Page MenuHomeFreeBSD

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/Makefile.inc1 b/Makefile.inc1
index 8838d31c795a..f740cc5abcca 100644
--- a/Makefile.inc1
+++ b/Makefile.inc1
@@ -1,3692 +1,3695 @@
#
#
# Make command line options:
# -DNO_CLEANDIR run ${MAKE} clean, instead of ${MAKE} cleandir
# -DNO_CLEAN do not clean at all
# -DDB_FROM_SRC use the user/group databases in src/etc instead of
# the system database when installing.
# -DNO_SHARE do not go into share subdir
# -DKERNFAST define NO_KERNEL{CONFIG,CLEAN,OBJ}
# -DNO_KERNELCONFIG do not run config in ${MAKE} buildkernel
# -DNO_KERNELCLEAN do not run ${MAKE} clean in ${MAKE} buildkernel
# -DNO_KERNELOBJ do not run ${MAKE} obj in ${MAKE} buildkernel
# -DNO_ROOT install without using root privilege
# -DWITHOUT_CTF do not run the DTrace CTF conversion tools on built objects
# LOCAL_DIRS="list of dirs" to add additional dirs to the SUBDIR list
# LOCAL_ITOOLS="list of tools" to add additional tools to the ITOOLS list
# LOCAL_LIB_DIRS="list of dirs" to add additional dirs to libraries target
# LOCAL_MTREE="list of mtree files" to process to allow local directories
# to be created before files are installed
# LOCAL_LEGACY_DIRS="list of dirs" to add additional dirs to the legacy
# target
# LOCAL_BSTOOL_DIRS="list of dirs" to add additional dirs to the
# bootstrap-tools target
# LOCAL_TOOL_DIRS="list of dirs" to add additional dirs to the build-tools
# target
# LOCAL_XTOOL_DIRS="list of dirs" to add additional dirs to the
# cross-tools target
# METALOG="path to metadata log" to write permission and ownership
# when NO_ROOT is set. (default: ${DESTDIR}/${DISTDIR}/METALOG,
# check /etc/make.conf for DISTDIR)
# TARGET="machine" to crossbuild world for a different machine type
# TARGET_ARCH= may be required when a TARGET supports multiple endians
# BUILDENV_SHELL= shell to launch for the buildenv target (def:${SHELL})
# WORLD_FLAGS= additional flags to pass to make(1) during buildworld
# KERNEL_FLAGS= additional flags to pass to make(1) during buildkernel
# SUBDIR_OVERRIDE="list of dirs" to build rather than everything.
# All libraries and includes, and some build tools will still build.
#
# The intended user-driven targets are:
# buildworld - rebuild *everything*, including glue to help do upgrades
# installworld- install everything built by "buildworld"
# checkworld - run test suite on installed world
# doxygen - build API documentation of the kernel
#
# Standard targets (not defined here) are documented in the makefiles in
# /usr/share/mk. These include:
# obj depend all install clean cleandepend cleanobj
.if !defined(TARGET) || !defined(TARGET_ARCH)
.error Both TARGET and TARGET_ARCH must be defined.
.endif
.if make(showconfig) || make(test-system-*)
_MKSHOWCONFIG= t
.endif
SRCDIR?= ${.CURDIR}
LOCALBASE?= /usr/local
TIME_ENV ?= time env
.include "share/mk/src.tools.mk"
# Cross toolchain changes must be in effect before bsd.compiler.mk
# so that gets the right CC, and pass CROSS_TOOLCHAIN to submakes.
.if defined(CROSS_TOOLCHAIN)
.if exists(${LOCALBASE}/share/toolchains/${CROSS_TOOLCHAIN}.mk)
.include "${LOCALBASE}/share/toolchains/${CROSS_TOOLCHAIN}.mk"
.elif exists(${CROSS_TOOLCHAIN})
.include "${CROSS_TOOLCHAIN}"
.else
.error CROSS_TOOLCHAIN ${CROSS_TOOLCHAIN} not found
.endif
CROSSENV+=CROSS_TOOLCHAIN="${CROSS_TOOLCHAIN}"
.elif defined(UNIVERSE_TOOLCHAIN)
UNIVERSE_TOOLCHAIN_PATH?=${HOST_OBJTOP}/tmp/usr/bin
XCC?="${UNIVERSE_TOOLCHAIN_PATH}/cc"
XCXX?="${UNIVERSE_TOOLCHAIN_PATH}/c++"
XCPP?="${UNIVERSE_TOOLCHAIN_PATH}/cpp"
XLD?="${UNIVERSE_TOOLCHAIN_PATH}/ld"
.endif
.if defined(CROSS_TOOLCHAIN_PREFIX)
CROSS_COMPILER_PREFIX?=${CROSS_TOOLCHAIN_PREFIX}
.endif
XCOMPILERS= CC CXX CPP
.for COMPILER in ${XCOMPILERS}
.if defined(CROSS_COMPILER_PREFIX)
X${COMPILER}?= ${CROSS_COMPILER_PREFIX}${${COMPILER}}
.else
X${COMPILER}?= ${${COMPILER}}
.endif
.endfor
# If a full path to an external cross compiler is given, don't build
# a cross compiler.
.if ${XCC:N${CCACHE_BIN}:M/*}
MK_CLANG_BOOTSTRAP= no
# Make sure sub-makes see the option as disabled so the hack in bsd.sys.mk to
# work around incompatible headers in Clang's resource directory is enabled.
.MAKEOVERRIDES+= MK_CLANG_BOOTSTRAP
.endif
# Pull in compiler metadata from buildworld/toolchain if possible to avoid
# running CC from bsd.compiler.mk.
.if make(installworld) || make(install) || make(distributeworld) || \
make(stageworld)
.-include "${OBJTOP}/toolchain-metadata.mk"
.if !defined(_LOADED_TOOLCHAIN_METADATA)
.error A build is required first. You may have the wrong MAKEOBJDIRPREFIX set.
.endif
.endif
# Pull in COMPILER_TYPE and COMPILER_FREEBSD_VERSION early. Pull it from the
# tree to be friendlier to foreign OS builds. It's safe to do so unconditionally
# here since we will always have the right make, unlike in src/Makefile
# Don't include bsd.linker.mk yet until XBINUTILS is handled (after src.opts.mk)
_NO_INCLUDE_LINKERMK= t
# We also want the X_COMPILER* variables if we are using an external toolchain.
_WANT_TOOLCHAIN_CROSS_VARS= t
.include "share/mk/bsd.compiler.mk"
.undef _NO_INCLUDE_LINKERMK
.undef _WANT_TOOLCHAIN_CROSS_VARS
# src.opts.mk depends on COMPILER_FEATURES
.include "share/mk/src.opts.mk"
.if ${TARGET} == ${MACHINE}
TARGET_CPUTYPE?=${CPUTYPE}
.else
TARGET_CPUTYPE?=
.endif
.if !empty(TARGET_CPUTYPE)
_TARGET_CPUTYPE=${TARGET_CPUTYPE}
.else
_TARGET_CPUTYPE=dummy
.endif
.if ${TARGET} == "arm"
.if ${TARGET_CPUTYPE:M*soft*} == ""
TARGET_TRIPLE_ABI= gnueabihf
.else
TARGET_TRIPLE_ABI= gnueabi
.endif
.endif
MACHINE_TRIPLE_ABI?= unknown
MACHINE_TRIPLE?=${MACHINE_ARCH:S/amd64/x86_64/}-${MACHINE_TRIPLE_ABI}-freebsd${OS_REVISION}
TARGET_TRIPLE_ABI?= unknown
TARGET_TRIPLE?= ${TARGET_ARCH:S/amd64/x86_64/}-${TARGET_TRIPLE_ABI}-freebsd${OS_REVISION}
KNOWN_ARCHES?= aarch64/arm64 \
amd64 \
armv6/arm \
armv7/arm \
i386 \
powerpc \
powerpc64/powerpc \
powerpc64le/powerpc \
powerpcspe/powerpc \
riscv64/riscv
.if ${TARGET} == ${TARGET_ARCH}
_t= ${TARGET}
.else
_t= ${TARGET_ARCH}/${TARGET}
.endif
.for _t in ${_t}
.if empty(KNOWN_ARCHES:M${_t})
.error Unknown target ${TARGET_ARCH}:${TARGET}.
.endif
.endfor
.if ${TARGET_ARCH} == "amd64"
LIBCOMPAT_INCLUDE_DIRS+= i386
.elif ${TARGET_ARCH} == "aarch64"
LIBCOMPAT_INCLUDE_DIRS+= arm
.endif
.if ${.MAKE.OS} != "FreeBSD"
CROSSBUILD_HOST=${.MAKE.OS}
.if ${.MAKE.OS} != "Linux" && ${.MAKE.OS} != "Darwin"
.warning Unsupported crossbuild system: ${.MAKE.OS}. Build will probably fail!
.endif
# We need to force NO_ROOT/DB_FROM_SRC builds when building on other operating
# systems since the BSD.foo.dist specs contain users and groups that do not
# exist by default on a Linux/MacOS system.
NO_ROOT:= 1
DB_FROM_SRC:= 1
.export NO_ROOT
.endif
# If all targets are disabled for system llvm then don't expect it to work
# for cross-builds.
.if !defined(TOOLS_PREFIX) && ${MK_LLVM_TARGET_ALL} == "no" && \
${MACHINE} != ${TARGET} && ${MACHINE_ARCH} != ${TARGET_ARCH} && \
!make(showconfig)
MK_SYSTEM_COMPILER= no
MK_SYSTEM_LINKER= no
.endif
# Handle external binutils.
.if defined(CROSS_TOOLCHAIN_PREFIX)
CROSS_BINUTILS_PREFIX?=${CROSS_TOOLCHAIN_PREFIX}
.endif
XBINUTILS= AS AR ELFCTL LD NM OBJCOPY RANLIB SIZE STRINGS STRIPBIN
.for BINUTIL in ${XBINUTILS}
.if defined(CROSS_BINUTILS_PREFIX) && \
exists(${CROSS_BINUTILS_PREFIX}/${${BINUTIL}})
X${BINUTIL}?= ${CROSS_BINUTILS_PREFIX:C,/*$,,}/${${BINUTIL}}
.else
X${BINUTIL}?= ${${BINUTIL}}
.endif
.endfor
# If a full path to an external linker is given, don't build lld.
.if ${XLD:M/*}
MK_LLD_BOOTSTRAP= no
.endif
# We also want the X_LINKER* variables if we are using an external toolchain.
_WANT_TOOLCHAIN_CROSS_VARS= t
.include "share/mk/bsd.linker.mk"
.undef _WANT_TOOLCHAIN_CROSS_VARS
# Begin WITH_SYSTEM_COMPILER / WITH_SYSTEM_LD
# WITH_SYSTEM_COMPILER - Pull in needed values and make a decision.
# Check if there is a local compiler that can satisfy as an external compiler.
# Which compiler is expected to be used?
.if ${MK_CLANG_BOOTSTRAP} == "yes"
WANT_COMPILER_TYPE= clang
.else
WANT_COMPILER_TYPE=
.endif
.if !defined(WANT_COMPILER_FREEBSD_VERSION) && !make(showconfig) && \
!make(test-system-linker)
.if ${WANT_COMPILER_TYPE} == "clang"
WANT_COMPILER_FREEBSD_VERSION_FILE= lib/clang/freebsd_cc_version.h
WANT_COMPILER_FREEBSD_VERSION!= \
awk '$$2 == "FREEBSD_CC_VERSION" {printf("%d\n", $$3)}' \
${SRCDIR}/${WANT_COMPILER_FREEBSD_VERSION_FILE} || echo unknown
WANT_COMPILER_VERSION_FILE= lib/clang/include/clang/Basic/Version.inc
WANT_COMPILER_VERSION!= \
awk '$$2 == "CLANG_VERSION" {split($$3, a, "."); print a[1] * 10000 + a[2] * 100 + a[3]}' \
${SRCDIR}/${WANT_COMPILER_VERSION_FILE} || echo unknown
.endif
.export WANT_COMPILER_FREEBSD_VERSION WANT_COMPILER_VERSION
.endif # !defined(WANT_COMPILER_FREEBSD_VERSION)
# It needs to be the same revision as we would build for the bootstrap.
# If the expected vs CC is different then we can't skip.
# GCC cannot be used for cross-arch yet. For clang we pass -target later if
# TARGET_ARCH!=MACHINE_ARCH.
.if ${MK_SYSTEM_COMPILER} == "yes" && \
defined(WANT_COMPILER_FREEBSD_VERSION) && \
${MK_CLANG_BOOTSTRAP} == "yes" && \
!make(xdev*) && \
${X_COMPILER_TYPE} == ${WANT_COMPILER_TYPE} && \
(${X_COMPILER_TYPE} == "clang" || ${TARGET_ARCH} == ${MACHINE_ARCH}) && \
${X_COMPILER_VERSION} == ${WANT_COMPILER_VERSION} && \
${X_COMPILER_FREEBSD_VERSION} == ${WANT_COMPILER_FREEBSD_VERSION}
# Everything matches, disable the bootstrap compiler.
MK_CLANG_BOOTSTRAP= no
USING_SYSTEM_COMPILER= yes
.endif # ${WANT_COMPILER_TYPE} == ${COMPILER_TYPE}
# WITH_SYSTEM_LD - Pull in needed values and make a decision.
# Check if there is a local linker that can satisfy as an external linker.
# Which linker is expected to be used?
.if ${MK_LLD_BOOTSTRAP} == "yes"
WANT_LINKER_TYPE= lld
.else
WANT_LINKER_TYPE=
.endif
.if !defined(WANT_LINKER_FREEBSD_VERSION) && !make(showconfig) && \
!make(test-system-compiler)
.if ${WANT_LINKER_TYPE} == "lld"
WANT_LINKER_FREEBSD_VERSION_FILE= lib/clang/include/lld/Common/Version.inc
WANT_LINKER_FREEBSD_VERSION!= \
awk '$$2 == "LLD_FREEBSD_VERSION" {print $$3}' \
${SRCDIR}/${WANT_LINKER_FREEBSD_VERSION_FILE} || echo unknown
WANT_LINKER_VERSION_FILE= lib/clang/include/lld/Common/Version.inc
WANT_LINKER_VERSION!= \
awk '$$2 == "LLD_VERSION_STRING" {gsub("\"", "", $$3); split($$3, a, "."); print a[1] * 10000 + a[2] * 100 + a[3]}' \
${SRCDIR}/${WANT_LINKER_VERSION_FILE} || echo unknown
.else
WANT_LINKER_FREEBSD_VERSION_FILE=
WANT_LINKER_FREEBSD_VERSION=
.endif
.export WANT_LINKER_FREEBSD_VERSION WANT_LINKER_VERSION
.endif # !defined(WANT_LINKER_FREEBSD_VERSION)
.if ${MK_SYSTEM_LINKER} == "yes" && \
defined(WANT_LINKER_FREEBSD_VERSION) && \
(${MK_LLD_BOOTSTRAP} == "yes") && \
!make(xdev*) && \
${X_LINKER_TYPE} == ${WANT_LINKER_TYPE} && \
${X_LINKER_VERSION} == ${WANT_LINKER_VERSION} && \
${X_LINKER_FREEBSD_VERSION} == ${WANT_LINKER_FREEBSD_VERSION}
# Everything matches, disable the bootstrap linker.
MK_LLD_BOOTSTRAP= no
USING_SYSTEM_LINKER= yes
.endif # ${WANT_LINKER_TYPE} == ${LINKER_TYPE}
# WITH_SYSTEM_COMPILER / WITH_SYSTEM_LINKER - Handle defaults and debug.
USING_SYSTEM_COMPILER?= no
USING_SYSTEM_LINKER?= no
TEST_SYSTEM_COMPILER_VARS= \
USING_SYSTEM_COMPILER MK_SYSTEM_COMPILER \
MK_CROSS_COMPILER MK_CLANG_BOOTSTRAP \
WANT_COMPILER_TYPE WANT_COMPILER_VERSION WANT_COMPILER_VERSION_FILE \
WANT_COMPILER_FREEBSD_VERSION WANT_COMPILER_FREEBSD_VERSION_FILE \
CC COMPILER_TYPE COMPILER_FEATURES COMPILER_VERSION \
COMPILER_FREEBSD_VERSION \
XCC X_COMPILER_TYPE X_COMPILER_FEATURES X_COMPILER_VERSION \
X_COMPILER_FREEBSD_VERSION
TEST_SYSTEM_LINKER_VARS= \
USING_SYSTEM_LINKER MK_SYSTEM_LINKER \
MK_LLD_BOOTSTRAP \
WANT_LINKER_TYPE WANT_LINKER_VERSION WANT_LINKER_VERSION_FILE \
WANT_LINKER_FREEBSD_VERSION WANT_LINKER_FREEBSD_VERSION_FILE \
LD LINKER_TYPE LINKER_FEATURES LINKER_VERSION \
LINKER_FREEBSD_VERSION \
XLD X_LINKER_TYPE X_LINKER_FEATURES X_LINKER_VERSION \
X_LINKER_FREEBSD_VERSION
.for _t in compiler linker
test-system-${_t}: .PHONY
.for v in ${TEST_SYSTEM_${_t:tu}_VARS}
${_+_}@printf "%-35s= %s\n" "${v}" "${${v}}"
.endfor
.endfor
.if (make(buildworld) || make(buildkernel) || make(kernel-toolchain) || \
make(toolchain) || make(_cross-tools))
.if ${USING_SYSTEM_COMPILER} == "yes"
.info SYSTEM_COMPILER: Determined that CC=${CC} matches the source tree. Not bootstrapping a cross-compiler.
.elif ${MK_CLANG_BOOTSTRAP} == "yes"
.info SYSTEM_COMPILER: libclang will be built for bootstrapping a cross-compiler.
.endif
.if ${USING_SYSTEM_LINKER} == "yes"
.info SYSTEM_LINKER: Determined that LD=${LD} matches the source tree. Not bootstrapping a cross-linker.
.elif ${MK_LLD_BOOTSTRAP} == "yes"
.info SYSTEM_LINKER: libclang will be built for bootstrapping a cross-linker.
.endif
.endif
# End WITH_SYSTEM_COMPILER / WITH_SYSTEM_LD
# Store some compiler metadata for use in installworld where we don't
# want to invoke CC at all.
_TOOLCHAIN_METADATA_VARS= COMPILER_VERSION \
COMPILER_TYPE \
COMPILER_FEATURES \
COMPILER_FREEBSD_VERSION \
COMPILER_RESOURCE_DIR \
LINKER_VERSION \
LINKER_FEATURES \
LINKER_TYPE \
LINKER_FREEBSD_VERSION
toolchain-metadata.mk: .PHONY .META
@: > ${.TARGET}
@echo ".info Using cached toolchain metadata from build at $$(hostname) on $$(date)" \
> ${.TARGET}
@echo "_LOADED_TOOLCHAIN_METADATA=t" >> ${.TARGET}
.for v in ${_TOOLCHAIN_METADATA_VARS}
@echo "${v}=${${v}}" >> ${.TARGET}
@echo "X_${v}=${X_${v}}" >> ${.TARGET}
.endfor
@echo ".export ${_TOOLCHAIN_METADATA_VARS}" >> ${.TARGET}
@echo ".export ${_TOOLCHAIN_METADATA_VARS:C,^,X_,}" >> ${.TARGET}
# We must do lib/ and libexec/ before bin/ in case of a mid-install error to
# keep the users system reasonably usable. For static->dynamic root upgrades,
# we don't want to install a dynamic binary without rtld and the needed
# libraries. More commonly, for dynamic root, we don't want to install a
# binary that requires a newer library version that hasn't been installed yet.
# This ordering is not a guarantee though. The only guarantee of a working
# system here would require fine-grained ordering of all components based
# on their dependencies.
.if !empty(SUBDIR_OVERRIDE)
SUBDIR= ${SUBDIR_OVERRIDE}
.else
SUBDIR= lib libexec
# Add LOCAL_LIB_DIRS, but only if they will not be picked up as a SUBDIR
# of a LOCAL_DIRS directory. This allows LOCAL_DIRS=foo and
# LOCAL_LIB_DIRS=foo/lib to behave as expected.
.for _DIR in ${LOCAL_DIRS:M*/} ${LOCAL_DIRS:N*/:S|$|/|}
_REDUNDANT_LIB_DIRS+= ${LOCAL_LIB_DIRS:M${_DIR}*}
.endfor
.for _DIR in ${LOCAL_LIB_DIRS}
.if ${_DIR} == ".WAIT" || (empty(_REDUNDANT_LIB_DIRS:M${_DIR}) && exists(${.CURDIR}/${_DIR}/Makefile))
SUBDIR+= ${_DIR}
.endif
.endfor
.if !defined(NO_ROOT) && (make(installworld) || make(install))
# Ensure libraries are installed before progressing.
SUBDIR+=.WAIT
.endif
SUBDIR+=bin
.if ${MK_CDDL} != "no"
SUBDIR+=cddl
.endif
SUBDIR+=gnu include
.if ${MK_KERBEROS} != "no"
SUBDIR+=kerberos5
.endif
.if ${MK_RESCUE} != "no"
SUBDIR+=rescue
.endif
SUBDIR+=sbin
.if ${MK_CRYPT} != "no"
SUBDIR+=secure
.endif
.if !defined(NO_SHARE)
SUBDIR+=share
.endif
.if ${MK_BOOT} != "no"
SUBDIR+=stand
.endif
SUBDIR+=sys usr.bin usr.sbin
.if ${MK_TESTS} != "no"
SUBDIR+= tests
.endif
# Local directories are built in parallel with the base system directories.
# Users may insert a .WAIT directive at the beginning or elsewhere within
# the LOCAL_DIRS and LOCAL_LIB_DIRS lists as needed.
.for _DIR in ${LOCAL_DIRS}
.if ${_DIR} == ".WAIT" || exists(${.CURDIR}/${_DIR}/Makefile)
SUBDIR+= ${_DIR}
.endif
.endfor
# We must do etc/ last as it hooks into building the man whatis file
# by calling 'makedb' in share/man. This is only relevant for
# install/distribute so they build the whatis file after every manpage is
# installed.
.if make(installworld) || make(install)
SUBDIR+=.WAIT
.endif
SUBDIR+=etc
.endif # !empty(SUBDIR_OVERRIDE)
.if defined(NOCLEAN)
.warning The src.conf WITHOUT_CLEAN option can now be used instead of NOCLEAN.
MK_CLEAN:= no
.endif
.if defined(NO_CLEAN)
.info The src.conf WITHOUT_CLEAN option can now be used instead of NO_CLEAN.
MK_CLEAN:= no
.endif
.if defined(NO_CLEANDIR)
CLEANDIR= clean cleandepend
.else
CLEANDIR= cleandir
.endif
.if defined(WORLDFAST)
MK_CLEAN:= no
NO_OBJWALK= t
.endif
.if ${MK_META_MODE} == "yes"
# If filemon is used then we can rely on the build being incremental-safe.
# The .meta files will also track the build command and rebuild should
# it change.
.if empty(.MAKE.MODE:Mnofilemon)
MK_CLEAN:= no
.endif
.endif
.if defined(NO_OBJWALK) || ${MK_AUTO_OBJ} == "yes"
NO_OBJWALK= t
NO_KERNELOBJ= t
.endif
.if !defined(NO_OBJWALK)
_obj= obj
.endif
LOCAL_TOOL_DIRS?=
PACKAGEDIR?= ${DESTDIR}/${DISTDIR}
.if empty(SHELL:M*csh*)
BUILDENV_SHELL?=${SHELL}
.else
BUILDENV_SHELL?=/bin/sh
.endif
.if !defined(_MKSHOWCONFIG)
.if !defined(VCS_REVISION) || empty(VCS_REVISION)
.if !defined(SVNVERSION_CMD) || empty(SVNVERSION_CMD)
. for _D in ${PATH:S,:, ,g}
. if exists(${_D}/svnversion)
SVNVERSION_CMD?=${_D}/svnversion
. endif
. if exists(${_D}/svnliteversion)
SVNVERSION_CMD?=${_D}/svnliteversion
. endif
. endfor
.endif
.if defined(SVNVERSION_CMD) && !empty(SVNVERSION_CMD)
_VCS_REVISION?= $$(eval ${SVNVERSION_CMD} ${SRCDIR})
. if !empty(_VCS_REVISION)
VCS_REVISION= $$(echo r${_VCS_REVISION})
.export VCS_REVISION
. endif
.endif
.endif
.if !defined(GIT_CMD) || empty(GIT_CMD)
. for _P in /usr/bin /usr/local/bin
. if exists(${_P}/git)
GIT_CMD= ${_P}/git
. endif
. endfor
.export GIT_CMD
.endif
.if !defined(OSRELDATE)
.if exists(/usr/include/osreldate.h)
OSRELDATE!= awk '/^\#define[[:space:]]*__FreeBSD_version/ { print $$3 }' \
/usr/include/osreldate.h
.else
OSRELDATE= 0
.endif
.export OSRELDATE
.endif
# Set VERSION for CTFMERGE to use via the default CTFFLAGS=-L VERSION.
.for _V in BRANCH REVISION TYPE
.if !defined(_${_V})
_${_V}!= eval $$(awk '/^${_V}=/{print}' ${SRCTOP}/sys/conf/newvers.sh); echo $$${_V}
.export _${_V}
.endif
.endfor
.if !defined(SRCRELDATE)
SRCRELDATE!= awk '/^\#define[[:space:]]*__FreeBSD_version/ { print $$3 }' \
${SRCDIR}/sys/sys/param.h
.export SRCRELDATE
.endif
.if !defined(VERSION)
VERSION= FreeBSD ${_REVISION}-${_BRANCH:C/-p[0-9]+$//} ${TARGET_ARCH} ${SRCRELDATE}
.export VERSION
.endif
MAJOR_REVISION= ${_REVISION:R}
.if !defined(PKG_VERSION)
_STRTIMENOW= %Y%m%d%H%M%S
_TIMENOW= ${_STRTIMENOW:gmtime}
.if ${_BRANCH:MCURRENT*} || ${_BRANCH:MSTABLE*} || ${_BRANCH:MPRERELEASE*}
EXTRA_REVISION= .snap${_TIMENOW}
.elif ${_BRANCH:MALPHA*}
EXTRA_REVISION= .a${_BRANCH:C/ALPHA([0-9]+).*/\1/}.${_TIMENOW}
.elif ${_BRANCH:MBETA*}
EXTRA_REVISION= .b${_BRANCH:C/BETA([0-9]+).*/\1/}.${_TIMENOW}
.elif ${_BRANCH:MRC*}
EXTRA_REVISION= .rc${_BRANCH:C/RC([0-9]+).*/\1/}.${_TIMENOW}
.elif ${_BRANCH:M*-p*}
EXTRA_REVISION= p${_BRANCH:C/.*-p([0-9]+$)/\1/}
.endif
PKG_VERSION:= ${MAJOR_REVISION}${EXTRA_REVISION:C/[[:space:]]//g}
.endif
.endif # !defined(PKG_VERSION)
.if !defined(PKG_TIMESTAMP)
TIMEEPOCHNOW= %s
SOURCE_DATE_EPOCH= ${TIMEEPOCHNOW:gmtime}
.else
SOURCE_DATE_EPOCH= ${PKG_TIMESTAMP}
.endif
PKG_NAME_PREFIX?= FreeBSD
PKG_MAINTAINER?= re@FreeBSD.org
PKG_WWW?= https://www.FreeBSD.org
.export PKG_NAME_PREFIX
.export PKG_MAINTAINER
.export PKG_WWW
.if !defined(_MKSHOWCONFIG)
_CPUTYPE!= MAKEFLAGS= CPUTYPE=${_TARGET_CPUTYPE} ${MAKE} -f /dev/null \
-m ${.CURDIR}/share/mk MK_AUTO_OBJ=no -V CPUTYPE
.if ${_CPUTYPE} != ${_TARGET_CPUTYPE}
.error CPUTYPE global should be set with ?=.
.endif
.endif
.if make(buildworld)
BUILD_ARCH!= uname -p
# On some Linux systems uname -p returns "unknown" so skip this check there.
# This check only exists to tell people to use TARGET_ARCH instead of
# MACHINE_ARCH so skipping it when crossbuilding on non-FreeBSD should be fine.
.if ${MACHINE_ARCH} != ${BUILD_ARCH} && ${.MAKE.OS} == "FreeBSD"
.error To cross-build, set TARGET_ARCH.
.endif
.endif
WORLDTMP?= ${OBJTOP}/tmp
BPATH= ${CCACHE_WRAPPER_PATH_PFX}${WORLDTMP}/legacy/usr/sbin:${WORLDTMP}/legacy/usr/bin:${WORLDTMP}/legacy/bin:${WORLDTMP}/legacy/usr/libexec
XPATH= ${WORLDTMP}/bin:${WORLDTMP}/usr/sbin:${WORLDTMP}/usr/bin
# When building we want to find the cross tools before the host tools in ${BPATH}.
# We also need to add UNIVERSE_TOOLCHAIN_PATH so that we can find the shared
# toolchain files (clang, lld, etc.) during make universe/tinderbox
STRICTTMPPATH= ${XPATH}:${BPATH}:${UNIVERSE_TOOLCHAIN_PATH}
# We should not be using tools from /usr/bin accidentally since this could cause
# the build to break on other systems that don't have that tool. For now we
# still allow using the old behaviour (inheriting $PATH) if
# BUILD_WITH_STRICT_TMPPATH is set to 0 but this will eventually be removed.
# Currently strict $PATH can cause build failures. Once the remaining issues
# have been resolved it will be turned on by default.
BUILD_WITH_STRICT_TMPPATH?=0
.if defined(CROSSBUILD_HOST)
# When building on non-FreeBSD we can't rely on the tools in /usr/bin being compatible
# with what FreeBSD expects. Therefore we only use tools from STRICTTMPPATH
# during the world build stage. We build most tools during the bootstrap-tools
# phase but symlink host tools that are known to work instead of building them
BUILD_WITH_STRICT_TMPPATH:=1
.endif
.if ${BUILD_WITH_STRICT_TMPPATH} != 0
TMPPATH= ${STRICTTMPPATH}
.else
TMPPATH= ${STRICTTMPPATH}:${PATH}
.endif
#
# Avoid running mktemp(1) unless actually needed.
# It may not be functional, e.g., due to new ABI
# when in the middle of installing over this system.
#
.if make(distributeworld) || make(installworld) || make(stageworld)
.if ${BUILD_WITH_STRICT_TMPPATH} != 0
MKTEMP=${WORLDTMP}/legacy/usr/bin/mktemp
.if !exists(${MKTEMP})
.error mktemp binary doesn't exist in expected location: ${MKTEMP}
.endif
.else
MKTEMP=mktemp
.endif
INSTALLTMP!= ${MKTEMP} -d -u -t install
.if ${.MAKE.OS} == "FreeBSD"
# When building on FreeBSD we always copy the host tools instead of linking
# into INSTALLTMP to avoid issues with incompatible libraries (see r364030).
# Note: we could create links if we don't intend to update the current machine.
INSTALLTMP_COPY_HOST_TOOL=cp
.else
# However, this is not necessary on Linux/macOS. Additionally, copying the host
# tools to another directory with cp results in AMFI Launch Constraint
# Violations on macOS Ventura as part of its System Integrity Protection.
INSTALLTMP_COPY_HOST_TOOL=ln -s
.endif
.endif
.if make(stagekernel) || make(distributekernel)
TAGS+= kernel
PACKAGE= kernel
.endif
#
# Building a world goes through the following stages
#
# 1. legacy stage [BMAKE]
# This stage is responsible for creating compatibility
# shims that are needed by the bootstrap-tools,
# build-tools and cross-tools stages. These are generally
# APIs that tools from one of those three stages need to
# build that aren't present on the host.
# 1. bootstrap-tools stage [BMAKE]
# This stage is responsible for creating programs that
# are needed for backward compatibility reasons. They
# are not built as cross-tools.
# 2. build-tools stage [TMAKE]
# This stage is responsible for creating the object
# tree and building any tools that are needed during
# the build process. Some programs are listed during
# this phase because they build binaries to generate
# files needed to build these programs. This stage also
# builds the 'build-tools' target rather than 'all'.
# 3. cross-tools stage [XMAKE]
# This stage is responsible for creating any tools that
# are needed for building the system. A cross-compiler is one
# of them. This differs from build tools in two ways:
# 1. the 'all' target is built rather than 'build-tools'
# 2. these tools are installed into TMPPATH for stage 4.
# 4. world stage [WMAKE]
# This stage actually builds the world.
# 5. install stage (optional) [IMAKE]
# This stage installs a previously built world.
#
BOOTSTRAPPING?= 0
# Keep these in sync
MINIMUM_SUPPORTED_OSREL?= 1104001
MINIMUM_SUPPORTED_REL?= 11.4
# Common environment for world related stages
CROSSENV+= \
MACHINE_ARCH=${TARGET_ARCH} \
MACHINE=${TARGET} \
CPUTYPE=${TARGET_CPUTYPE}
.if ${MK_META_MODE} != "no"
# Don't rebuild build-tools targets during normal build.
CROSSENV+= BUILD_TOOLS_META=.NOMETA
.endif
.if defined(TARGET_CFLAGS)
CROSSENV+= ${TARGET_CFLAGS}
.endif
.if (${TARGET} != ${MACHINE} && !defined(WITH_LOCAL_MODULES)) || \
defined(WITHOUT_LOCAL_MODULES)
CROSSENV+= LOCAL_MODULES=
.endif
BOOTSTRAPPING_OSRELDATE?=${OSRELDATE}
# bootstrap-tools stage
BMAKEENV= INSTALL="sh ${.CURDIR}/tools/install.sh" \
TOOLS_PREFIX=${TOOLS_PREFIX_UNDEF:U${WORLDTMP}} \
PATH=${BPATH:Q}:${PATH:Q} \
WORLDTMP=${WORLDTMP} \
MAKEFLAGS="-m ${.CURDIR}/tools/build/mk ${.MAKEFLAGS}"
# need to keep this in sync with targets/pseudo/bootstrap-tools/Makefile
BSARGS= DESTDIR= \
OBJTOP='${WORLDTMP}/obj-tools' \
OBJROOT='$${OBJTOP}/' \
UNIVERSE_TOOLCHAIN_PATH=${UNIVERSE_TOOLCHAIN_PATH} \
MAKEOBJDIRPREFIX= \
BOOTSTRAPPING=${BOOTSTRAPPING_OSRELDATE} \
BWPHASE=${.TARGET:C,^_,,} \
-DNO_CPU_CFLAGS \
-DNO_LINT \
-DNO_PIC \
-DNO_SHARED \
MK_ASAN=no \
MK_CTF=no \
MK_CLANG_EXTRAS=no \
MK_CLANG_FORMAT=no \
MK_CLANG_FULL=no \
MK_HTML=no \
MK_MAN=no \
MK_PROFILE=no \
MK_RETPOLINE=no \
MK_SSP=no \
MK_TESTS=no \
MK_UBSAN=no \
MK_WERROR=no \
MK_INCLUDES=yes \
MK_MAN_UTILS=yes
BMAKE= \
${TIME_ENV} ${BMAKEENV} ${MAKE} ${WORLD_FLAGS} -f Makefile.inc1 \
${BSARGS}
.if empty(.MAKEOVERRIDES:MMK_LLVM_TARGET_ALL)
BMAKE+= MK_LLVM_TARGET_ALL=no
.endif
# build-tools stage
TMAKE= \
${TIME_ENV} ${BMAKEENV} ${MAKE} ${WORLD_FLAGS} -f Makefile.inc1 \
TARGET=${TARGET} TARGET_ARCH=${TARGET_ARCH} \
DESTDIR= \
BOOTSTRAPPING=${BOOTSTRAPPING_OSRELDATE} \
BWPHASE=${.TARGET:C,^_,,} \
-DNO_CPU_CFLAGS \
-DNO_LINT \
MK_ASAN=no \
MK_CTF=no \
MK_CLANG_EXTRAS=no \
MK_CLANG_FORMAT=no \
MK_CLANG_FULL=no \
MK_LLDB=no \
MK_RETPOLINE=no \
MK_SSP=no \
MK_TESTS=no \
MK_UBSAN=no \
MK_WERROR=no
# cross-tools stage
# TOOLS_PREFIX set in BMAKE
XMAKE= ${BMAKE} \
TARGET=${TARGET} TARGET_ARCH=${TARGET_ARCH} \
MK_LLDB=no \
MK_LLVM_BINUTILS=no \
MK_TESTS=no
# kernel-tools stage
KTMAKEENV= INSTALL="sh ${.CURDIR}/tools/install.sh" \
PATH=${BPATH:Q}:${PATH:Q} \
WORLDTMP=${WORLDTMP} \
MAKEFLAGS="-m ${.CURDIR}/tools/build/mk ${.MAKEFLAGS}"
KTMAKE= ${TIME_ENV} \
TOOLS_PREFIX=${TOOLS_PREFIX_UNDEF:U${WORLDTMP}} \
${KTMAKEENV} ${MAKE} ${WORLD_FLAGS} -f Makefile.inc1 \
DESTDIR= \
OBJTOP='${WORLDTMP}/obj-kernel-tools' \
OBJROOT='$${OBJTOP}/' \
UNIVERSE_TOOLCHAIN_PATH=${UNIVERSE_TOOLCHAIN_PATH} \
MAKEOBJDIRPREFIX= \
BOOTSTRAPPING=${BOOTSTRAPPING_OSRELDATE} \
-DNO_CPU_CFLAGS \
-DNO_LINT \
-DNO_PIC \
-DNO_SHARED \
MK_CTF=no \
MK_HTML=no \
MK_MAN=no \
MK_PROFILE=no \
MK_SSP=no \
MK_RETPOLINE=no \
MK_WERROR=no
# world stage
WMAKEENV= ${CROSSENV} \
INSTALL="${INSTALL_CMD} -U" \
PATH=${TMPPATH:Q} \
SYSROOT=${WORLDTMP}
# make hierarchy
HMAKE= PATH=${TMPPATH:Q} ${MAKE} LOCAL_MTREE=${LOCAL_MTREE:Q}
.if defined(NO_ROOT)
HMAKE+= PATH=${TMPPATH:Q} METALOG=${METALOG} -DNO_ROOT
.endif
CROSSENV+= CC="${XCC} ${XCFLAGS}" CXX="${XCXX} ${XCXXFLAGS} ${XCFLAGS}" \
CPP="${XCPP} ${XCFLAGS}" \
AS="${XAS}" AR="${XAR}" ELFCTL="${XELFCTL}" LD="${XLD}" \
LLVM_LINK="${XLLVM_LINK}" NM=${XNM} OBJCOPY="${XOBJCOPY}" \
RANLIB=${XRANLIB} STRINGS=${XSTRINGS} \
SIZE="${XSIZE}" STRIPBIN="${XSTRIPBIN}"
.if defined(CROSS_BINUTILS_PREFIX) && exists(${CROSS_BINUTILS_PREFIX})
# In the case of xdev-build tools, CROSS_BINUTILS_PREFIX won't be a
# directory, but the compiler will look in the right place for its
# tools so we don't need to tell it where to look.
BFLAGS+= -B${CROSS_BINUTILS_PREFIX}
.endif
# The internal bootstrap compiler has a default sysroot set by TOOLS_PREFIX
# and target set by TARGET/TARGET_ARCH. However, there are several needs to
# always pass an explicit --sysroot and -target.
# - External compiler needs sysroot and target flags.
# - External ld needs sysroot.
# - To be clear about the use of a sysroot when using the internal compiler.
# - Easier debugging.
# - Allowing WITH_SYSTEM_COMPILER+WITH_META_MODE to work together due to
# the flip-flopping build command when sometimes using external and
# sometimes using internal.
# - Allow using lld which has no support for default paths.
.if !defined(CROSS_BINUTILS_PREFIX) || !exists(${CROSS_BINUTILS_PREFIX})
BFLAGS+= -B${WORLDTMP}/usr/bin
.endif
.if ${WANT_COMPILER_TYPE} == gcc || \
(defined(X_COMPILER_TYPE) && ${X_COMPILER_TYPE} == gcc)
.elif ${WANT_COMPILER_TYPE} == clang || \
(defined(X_COMPILER_TYPE) && ${X_COMPILER_TYPE} == clang)
XCFLAGS+= -target ${TARGET_TRIPLE}
.endif
XCFLAGS+= --sysroot=${WORLDTMP}
.if !empty(BFLAGS)
XCFLAGS+= ${BFLAGS}
.endif
.include "share/mk/bsd.compat.pre.mk"
.for LIBCOMPAT in ${_ALL_LIBCOMPATS}
.if ${MK_LIB${LIBCOMPAT}} == "yes"
_LIBCOMPATS+= ${LIBCOMPAT}
.endif
.endfor
.include "Makefile.libcompat"
# META_MODE normally ignores host file changes since every build updates
# timestamps (see NO_META_IGNORE_HOST in sys.mk). There are known times
# when the ABI breaks though that we want to force rebuilding WORLDTMP
# to get updated host tools.
.if ${MK_META_MODE} == "yes" && ${MK_CLEAN} == "no" && \
!defined(NO_META_IGNORE_HOST) && !defined(NO_META_IGNORE_HOST_HEADERS) && \
!defined(_MKSHOWCONFIG)
# r318736 - ino64 major ABI breakage
META_MODE_BAD_ABI_VERS+= 1200031
.if !defined(OBJDIR_HOST_OSRELDATE)
.if exists(${OBJTOP}/host-osreldate.h)
OBJDIR_HOST_OSRELDATE!= \
awk '/^\#define[[:space:]]*__FreeBSD_version/ { print $$3 }' \
${OBJTOP}/host-osreldate.h
.elif exists(${WORLDTMP}/usr/include/osreldate.h)
OBJDIR_HOST_OSRELDATE= 0
.endif
.export OBJDIR_HOST_OSRELDATE
.endif
# Note that this logic is the opposite of normal BOOTSTRAP handling. We want
# to compare the WORLDTMP's OSRELDATE to the host's OSRELDATE. If the WORLDTMP
# is older than the ABI-breakage OSRELDATE of the HOST then we rebuild.
.if defined(OBJDIR_HOST_OSRELDATE)
.for _ver in ${META_MODE_BAD_ABI_VERS}
.if ${OSRELDATE} >= ${_ver} && ${OBJDIR_HOST_OSRELDATE} < ${_ver}
_meta_mode_need_rebuild= ${_ver}
.endif
.endfor
.if defined(_meta_mode_need_rebuild)
.info META_MODE: Rebuilding host tools due to ABI breakage in __FreeBSD_version ${_meta_mode_need_rebuild}.
NO_META_IGNORE_HOST_HEADERS= 1
.export NO_META_IGNORE_HOST_HEADERS
.endif # defined(_meta_mode_need_rebuild)
.endif # defined(OBJDIR_HOST_OSRELDATE)
.endif # ${MK_META_MODE} == "yes" && ${MK_CLEAN} == "no" ...
# This is only used for META_MODE+filemon to track what the oldest
# __FreeBSD_version is in WORLDTMP. This purposely does NOT have
# a make dependency on /usr/include/osreldate.h as the file should
# only be copied when it is missing or meta mode determines it has changed.
# Since host files are normally ignored without NO_META_IGNORE_HOST
# the file will never be updated unless that flag is specified. This
# allows tracking the oldest osreldate to force rebuilds via
# META_MODE_BADABI_REVS above.
host-osreldate.h: # DO NOT ADD /usr/include/osreldate.h here
.if !defined(CROSSBUILD_HOST)
@cp -f /usr/include/osreldate.h ${.TARGET}
.else
@echo "#ifndef __FreeBSD_version" > ${.TARGET}
@echo "#define __FreeBSD_version ${OSRELDATE}" >> ${.TARGET}
@echo "#endif" >> ${.TARGET}
.endif
WMAKE= ${TIME_ENV} ${WMAKEENV} ${MAKE} ${WORLD_FLAGS} -f Makefile.inc1 \
BWPHASE=${.TARGET:C,^_,,} \
DESTDIR=${WORLDTMP}
IMAKEENV= ${CROSSENV}
IMAKE= ${TIME_ENV} ${IMAKEENV} ${MAKE} -f Makefile.inc1 \
${IMAKE_INSTALL} ${IMAKE_MTREE}
.if empty(.MAKEFLAGS:M-n)
IMAKEENV+= PATH=${STRICTTMPPATH:Q}:${INSTALLTMP:Q} \
LD_LIBRARY_PATH=${INSTALLTMP:Q} \
PATH_LOCALE=${INSTALLTMP}/locale
IMAKE+= __MAKE_SHELL=${INSTALLTMP}/sh
.else
IMAKEENV+= PATH=${TMPPATH:Q}:${INSTALLTMP:Q}
.endif
# When generating install media, do not allow user and group information from
# the build host to affect the contents of the distribution.
.if make(distributeworld) || make(distrib-dirs) || make(distribution) || \
make(stageworld)
DB_FROM_SRC= yes
.endif
.if defined(DB_FROM_SRC)
INSTALLFLAGS+= -N ${.CURDIR}/etc
MTREEFLAGS+= -N ${.CURDIR}/etc
.endif
_INSTALL_DDIR= ${DESTDIR}/${DISTDIR}
INSTALL_DDIR= ${_INSTALL_DDIR:S://:/:g:C:/$::}
.if defined(NO_ROOT)
METALOG?= ${DESTDIR}/${DISTDIR}/METALOG
METALOG:= ${METALOG:C,//+,/,g}
IMAKE+= -DNO_ROOT METALOG=${METALOG}
METALOG_INSTALLFLAGS= -U -M ${METALOG} -D ${INSTALL_DDIR}
INSTALLFLAGS+= ${METALOG_INSTALLFLAGS}
CERTCTLFLAGS= ${METALOG_INSTALLFLAGS}
MTREEFLAGS+= -W
.endif
.if defined(BUILD_PKGS)
INSTALLFLAGS+= -h sha256
.endif
.if defined(DB_FROM_SRC) || defined(NO_ROOT)
IMAKE_INSTALL= INSTALL="${INSTALL_CMD} ${INSTALLFLAGS}"
IMAKE_MTREE= MTREE_CMD="${MTREE_CMD} ${MTREEFLAGS}"
.endif
.if make(distributeworld)
CERTCTLDESTDIR= ${DESTDIR}/${DISTDIR}
CERTCTLFLAGS+= -d /base
.else
CERTCTLDESTDIR= ${DESTDIR}
.endif
CERTCTLFLAGS+= -D "${CERTCTLDESTDIR}"
DESTDIR_MTREEFLAGS= -deU
# When creating worldtmp we don't need to set the directories as owned by root
# so we also pass -W
WORLDTMP_MTREEFLAGS= -deUW
.if defined(NO_ROOT)
# When building with -DNO_ROOT we shouldn't be changing the directories
# that are created by mtree to be owned by root/wheel.
DESTDIR_MTREEFLAGS+= -W
.endif
.if defined(DB_FROM_SRC)
DISTR_MTREEFLAGS= -N ${.CURDIR}/etc
.endif
DISTR_MTREECMD= ${MTREE_CMD}
.if ${BUILD_WITH_STRICT_TMPPATH} != 0
DISTR_MTREECMD= ${WORLDTMP}/legacy/usr/sbin/mtree
.endif
DISTR_MTREE= ${DISTR_MTREECMD} ${DISTR_MTREEFLAGS}
WORLDTMP_MTREE= ${DISTR_MTREECMD} ${WORLDTMP_MTREEFLAGS}
DESTDIR_MTREE= ${DISTR_MTREECMD} ${DESTDIR_MTREEFLAGS}
METALOG_SORT_CMD= env -i LC_COLLATE=C sort
# kernel stage
KMAKEENV= ${WMAKEENV:NSYSROOT=*}
KMAKE= ${TIME_ENV} ${KMAKEENV} ${MAKE} ${.MAKEFLAGS} ${KERNEL_FLAGS} KERNEL=${INSTKERNNAME}
#
# buildworld
#
# Attempt to rebuild the entire system, with reasonable chance of
# success, regardless of how old your existing system is.
#
_sanity_check: .PHONY .MAKE
.if ${.CURDIR:C/[^,]//g} != ""
# The m4 build of sendmail files doesn't like it if ',' is used
# anywhere in the path of it's files.
@echo
@echo "*** Error: path to source tree contains a comma ','"
@echo
@false
.elif ${.CURDIR:M*\:*} != ""
# Using ':' leaks into PATH and breaks finding cross-tools.
@echo
@echo "*** Error: path to source tree contains a colon ':'"
@echo
@false
.endif
# Our current approach to dependency tracking cannot cope with certain source
# tree changes, particularly with respect to removing source files and
# replacing generated files. Handle these cases here in an ad-hoc fashion.
_cleanobj_fast_depend_hack: .PHONY
@echo ">>> Deleting stale dependencies...";
MACHINE=${MACHINE} MACHINE_ARCH=${MACHINE_ARCH} \
ALL_libcompats=${_ALL_libcompats:Q} \
sh ${.CURDIR}/tools/build/depend-cleanup.sh ${OBJTOP}
_cleanworldtmp: .PHONY
.if ${MK_CLEAN} == "yes"
@echo
@echo "--------------------------------------------------------------"
@echo ">>> Cleaning up the temporary build tree"
@echo "--------------------------------------------------------------"
rm -rf ${WORLDTMP}
.else
# Note: for delete-old we need to set $PATH to also include the host $PATH
# since otherwise a partial build with missing symlinks in ${WORLDTMP}/legacy/
# will fail to run due to missing binaries. $WMAKE sets PATH to only ${TMPPATH}
# so we remove that assingnment from $WMAKE and prepend the new $PATH
${_+_}@if [ -e "${WORLDTMP}" ]; then \
echo ">>> Deleting stale files in build tree..."; \
cd ${.CURDIR}; env PATH=${TMPPATH:Q}:${PATH:Q} ${WMAKE:NPATH=*} \
_NO_INCLUDE_COMPILERMK=t -DBATCH_DELETE_OLD_FILES delete-old \
delete-old-libs >/dev/null; \
fi
rm -rf ${WORLDTMP}/legacy/usr/include
.if ${USING_SYSTEM_COMPILER} == "yes"
.for cc in cc c++
if [ -x ${WORLDTMP}/usr/bin/${cc} ]; then \
inum=$$(stat -f %i ${WORLDTMP}/usr/bin/${cc}); \
find ${WORLDTMP}/usr/bin -inum $${inum} -delete; \
fi
.endfor
.endif # ${USING_SYSTEM_COMPILER} == "yes"
.if ${USING_SYSTEM_LINKER} == "yes"
@rm -f ${WORLDTMP}/usr/bin/ld ${WORLDTMP}/usr/bin/ld.lld
.endif # ${USING_SYSTEM_LINKER} == "yes"
.endif # ${MK_CLEAN} == "yes"
_worldtmp: .PHONY
@echo
@echo "--------------------------------------------------------------"
@echo ">>> Rebuilding the temporary build tree"
@echo "--------------------------------------------------------------"
@mkdir -p ${WORLDTMP}
@touch ${WORLDTMP}/${.TARGET}
# We can't use mtree to create the worldtmp directories since it may not be
# available on the target system (this happens e.g. when building on non-FreeBSD)
${_+_}cd ${.CURDIR}/tools/build; \
${MAKE} DIRPRFX=tools/build/ DESTDIR=${WORLDTMP}/legacy installdirs
# In order to build without inheriting $PATH we need to add symlinks to the host
# tools in $WORLDTMP for the tools that we don't build during bootstrap-tools
${_+_}cd ${.CURDIR}/tools/build; \
${MAKE} DIRPRFX=tools/build/ DESTDIR=${WORLDTMP}/legacy host-symlinks
_legacy:
@echo
@echo "--------------------------------------------------------------"
@echo ">>> stage 1.1: legacy release compatibility shims"
@echo "--------------------------------------------------------------"
${_+_}cd ${.CURDIR}; ${BMAKE} legacy
_bootstrap-tools:
@echo
@echo "--------------------------------------------------------------"
@echo ">>> stage 1.2: bootstrap tools"
@echo "--------------------------------------------------------------"
.if ${MK_CLEAN} != "yes"
${_+_}cd ${.CURDIR}; ${BMAKE} _NO_INCLUDE_COMPILERMK=t _cleanobj_fast_depend_hack
.endif
${_+_}cd ${.CURDIR}; ${BMAKE} bootstrap-tools
mkdir -p ${WORLDTMP}/usr ${WORLDTMP}/lib/geom ${WORLDTMP}/bin
${WORLDTMP_MTREE} -f ${.CURDIR}/etc/mtree/BSD.usr.dist \
-p ${WORLDTMP}/usr >/dev/null
${WORLDTMP_MTREE} -f ${.CURDIR}/etc/mtree/BSD.include.dist \
-p ${WORLDTMP}/usr/include >/dev/null
.for d in ${LIBCOMPAT_INCLUDE_DIRS}
mkdir -p ${WORLDTMP}/usr/include/${d}
.endfor
ln -sf ${.CURDIR}/sys ${WORLDTMP}
.if ${MK_DEBUG_FILES} != "no"
${WORLDTMP_MTREE} -f ${.CURDIR}/etc/mtree/BSD.debug.dist \
-p ${WORLDTMP}/usr/lib >/dev/null
.endif
.for _mtree in ${LOCAL_MTREE}
${WORLDTMP_MTREE} -f ${.CURDIR}/${_mtree} -p ${WORLDTMP} > /dev/null
.endfor
_cleanobj:
.if ${MK_CLEAN} == "yes"
@echo
@echo "--------------------------------------------------------------"
@echo ">>> stage 2.1: cleaning up the object tree"
@echo "--------------------------------------------------------------"
# Avoid including bsd.compiler.mk in clean and obj with _NO_INCLUDE_COMPILERMK
# since the restricted $PATH might not contain a valid cc binary
${_+_}cd ${.CURDIR}; ${WMAKE} _NO_INCLUDE_COMPILERMK=t ${CLEANDIR}
.for LIBCOMPAT in ${_LIBCOMPATS}
${_+_}cd ${.CURDIR}; ${LIB${LIBCOMPAT}WMAKE} _NO_INCLUDE_COMPILERMK=t -f Makefile.inc1 ${CLEANDIR}
.endfor
.else
${_+_}cd ${.CURDIR}; ${WMAKE} _NO_INCLUDE_COMPILERMK=t _cleanobj_fast_depend_hack
.endif # ${MK_CLEAN} == "yes"
_obj:
@echo
@echo "--------------------------------------------------------------"
@echo ">>> stage 2.2: rebuilding the object tree"
@echo "--------------------------------------------------------------"
${_+_}cd ${.CURDIR}; ${WMAKE} _NO_INCLUDE_COMPILERMK=t obj
_build-tools:
@echo
@echo "--------------------------------------------------------------"
@echo ">>> stage 2.3: build tools"
@echo "--------------------------------------------------------------"
${_+_}cd ${.CURDIR}; ${TMAKE} build-tools
_cross-tools:
@echo
@echo "--------------------------------------------------------------"
@echo ">>> stage 3: cross tools"
@echo "--------------------------------------------------------------"
@rm -f ${OBJTOP}/toolchain-metadata.mk
${_+_}cd ${.CURDIR}; ${XMAKE} cross-tools
${_+_}cd ${.CURDIR}; ${XMAKE} kernel-tools
_build-metadata:
@echo
@echo "--------------------------------------------------------------"
@echo ">>> stage 3.1: recording build metadata"
@echo "--------------------------------------------------------------"
${_+_}cd ${.CURDIR}; ${WMAKE} toolchain-metadata.mk
${_+_}cd ${.CURDIR}; ${WMAKE} host-osreldate.h
_includes:
@echo
@echo "--------------------------------------------------------------"
@echo ">>> stage 4.1: building includes"
@echo "--------------------------------------------------------------"
# Special handling for SUBDIR_OVERRIDE in buildworld as they most likely need
# headers from default SUBDIR. Do SUBDIR_OVERRIDE includes last.
${_+_}cd ${.CURDIR}; ${WMAKE} SUBDIR_OVERRIDE= SHARED=symlinks \
MK_INCLUDES=yes includes
.if !empty(SUBDIR_OVERRIDE) && make(buildworld)
${_+_}cd ${.CURDIR}; ${WMAKE} MK_INCLUDES=yes SHARED=symlinks includes
.endif
${_+_}cd ${.CURDIR}; ${WMAKE} test-includes
_libraries:
@echo
@echo "--------------------------------------------------------------"
@echo ">>> stage 4.2: building libraries"
@echo "--------------------------------------------------------------"
${_+_}cd ${.CURDIR}; \
${WMAKE} -DNO_FSCHG MK_HTML=no -DNO_LINT MK_MAN=no \
MK_PROFILE=no MK_TESTS=no MK_TESTS_SUPPORT=${MK_TESTS_SUPPORT} \
libraries
everything: .PHONY
@echo
@echo "--------------------------------------------------------------"
@echo ">>> stage 4.4: building everything"
@echo "--------------------------------------------------------------"
${_+_}cd ${.CURDIR}; _PARALLEL_SUBDIR_OK=1 ${WMAKE} all
WMAKE_TGTS=
.if !defined(WORLDFAST)
WMAKE_TGTS+= _sanity_check _cleanworldtmp _worldtmp _legacy
.if empty(SUBDIR_OVERRIDE)
WMAKE_TGTS+= _bootstrap-tools
.endif
WMAKE_TGTS+= _cleanobj
.if !defined(NO_OBJWALK)
WMAKE_TGTS+= _obj
.endif
WMAKE_TGTS+= _build-tools _cross-tools
WMAKE_TGTS+= _build-metadata
WMAKE_TGTS+= _includes
.endif
.if !defined(NO_LIBS)
WMAKE_TGTS+= _libraries
.endif
.if empty(SUBDIR_OVERRIDE)
.for libcompat in ${libcompats}
WMAKE_TGTS+= build${libcompat}
.endfor
.endif
WMAKE_TGTS+= everything
# record buildworld time in seconds
.if make(buildworld)
_BUILDWORLD_START!= date '+%s'
.export _BUILDWORLD_START
.endif
buildworld: buildworld_prologue ${WMAKE_TGTS} buildworld_epilogue .PHONY
.ORDER: buildworld_prologue ${WMAKE_TGTS} buildworld_epilogue
_ncpu_cmd=sysctl -n hw.ncpu 2>/dev/null || nproc 2>/dev/null || echo unknown
buildworld_prologue: .PHONY
@echo "--------------------------------------------------------------"
@echo ">>> World build started on `LC_ALL=C date`"
@echo "--------------------------------------------------------------"
buildworld_epilogue: .PHONY
@echo
@echo "--------------------------------------------------------------"
@echo ">>> World build completed on `LC_ALL=C date`"
@seconds=$$(($$(date '+%s') - ${_BUILDWORLD_START})); \
echo -n ">>> World built in $$seconds seconds, "; \
echo "ncpu: $$(${_ncpu_cmd})${.MAKE.JOBS:S/^/, make -j/}"
@echo "--------------------------------------------------------------"
#
# We need to have this as a target because the indirection between Makefile
# and Makefile.inc1 causes the correct PATH to be used, rather than a
# modification of the current environment's PATH. In addition, we need
# to quote multiword values.
#
buildenvvars: .PHONY
@echo ${WMAKEENV:Q} ${.MAKE.EXPORTED:@v@$v=\"${$v}\"@}
.if ${.TARGETS:Mbuildenv}
.if ${.MAKEFLAGS:M-j}
.error The buildenv target is incompatible with -j
.endif
.endif
BUILDENV_DIR?= ${.CURDIR}
#
# Note: make will report any errors the shell reports. This can
# be odd if the last command in an interactive shell generates an
# error or is terminated by SIGINT. These reported errors look bad,
# but are harmless. Allowing them also allows BUILDENV_SHELL to
# be a complex command whose status will be returned to the caller.
# Some scripts in tools rely on this behavior to report build errors.
#
buildenv: .PHONY
@echo Entering world for ${TARGET_ARCH}:${TARGET}
.if ${BUILDENV_SHELL:M*zsh*}
@echo For ZSH you must run: export CPUTYPE=${TARGET_CPUTYPE}
.endif
@cd ${BUILDENV_DIR} && env ${WMAKEENV} \
INSTALL="${INSTALL_CMD} ${INSTALLFLAGS}" \
MTREE_CMD="${MTREE_CMD} ${MTREEFLAGS}" BUILDENV=1 ${BUILDENV_SHELL}
TOOLCHAIN_TGTS= ${WMAKE_TGTS:Neverything:${libcompats:@v@Nbuild${v}@:ts:}}
toolchain: ${TOOLCHAIN_TGTS} .PHONY
KERNEL_TOOLCHAIN_TGTS= ${TOOLCHAIN_TGTS:N_obj:N_cleanobj:N_includes:N_libraries}
.if make(kernel-toolchain)
.ORDER: ${KERNEL_TOOLCHAIN_TGTS}
.endif
kernel-toolchain: ${KERNEL_TOOLCHAIN_TGTS} .PHONY
#
# installcheck
#
# Checks to be sure system is ready for installworld/installkernel.
#
installcheck: _installcheck_world _installcheck_kernel .PHONY
_installcheck_world: .PHONY
@echo "--------------------------------------------------------------"
@echo ">>> Install check world"
@echo "--------------------------------------------------------------"
_installcheck_kernel: .PHONY
@echo "--------------------------------------------------------------"
@echo ">>> Install check kernel"
@echo "--------------------------------------------------------------"
#
# Require DESTDIR to be set if installing for a different architecture or
# using the user/group database in the source tree.
#
.if ${TARGET_ARCH} != ${MACHINE_ARCH} || ${TARGET} != ${MACHINE} || \
defined(DB_FROM_SRC)
.if !make(distributeworld)
_installcheck_world: __installcheck_DESTDIR
_installcheck_kernel: __installcheck_DESTDIR
__installcheck_DESTDIR: .PHONY
.if !defined(DESTDIR) || empty(DESTDIR)
@echo "ERROR: Please set DESTDIR!"; \
false
.endif
.endif
.endif
.if !defined(DB_FROM_SRC)
#
# Check for missing UIDs/GIDs.
#
CHECK_UIDS= auditdistd
CHECK_GIDS= audit
CHECK_UIDS+= ntpd
CHECK_GIDS+= ntpd
CHECK_UIDS+= proxy
CHECK_GIDS+= proxy authpf
CHECK_UIDS+= smmsp
CHECK_GIDS+= smmsp
CHECK_UIDS+= unbound
CHECK_GIDS+= unbound
_installcheck_world: __installcheck_UGID
__installcheck_UGID: .PHONY
.for uid in ${CHECK_UIDS}
@if ! `id -u ${uid} >/dev/null 2>&1`; then \
echo "ERROR: Required ${uid} user is missing, see /usr/src/UPDATING."; \
false; \
fi
.endfor
.for gid in ${CHECK_GIDS}
@if ! `find / -prune -group ${gid} >/dev/null 2>&1`; then \
echo "ERROR: Required ${gid} group is missing, see /usr/src/UPDATING."; \
false; \
fi
.endfor
.endif
#
# If installing over the running system (DESTDIR is / or unset) and the install
# includes rescue, try running rescue from the objdir as a sanity check. If
# rescue is not functional (e.g., because it depends on a system call not
# supported by the currently running kernel), abort the installation.
#
.if !make(distributeworld) && ${MK_RESCUE} != "no" && \
(empty(DESTDIR) || ${DESTDIR} == "/") && empty(BYPASS_INSTALLCHECK_SH)
_installcheck_world: __installcheck_sh_check
__installcheck_sh_check: .PHONY
@if [ "`${OBJTOP}/rescue/rescue/rescue sh -c 'echo OK'`" != \
OK ]; then \
echo "rescue/sh check failed, installation aborted" >&2; \
false; \
fi
.endif
#
# Required install tools to be saved in a scratch dir for safety.
#
.if !defined(CROSSBUILD_HOST)
_sysctl=sysctl
.endif
ITOOLS= [ awk cap_mkdb cat chflags chmod chown cmp cp \
date echo egrep find grep id install ${_install-info} \
ln make mkdir mtree mv pwd_mkdb \
rm sed services_mkdb sh sort strip ${_sysctl} test time true uname wc
.if ${MK_ZONEINFO} != "no"
ITOOLS+=tzsetup
.endif
# Needed for share/man
.if ${MK_MAN_UTILS} != "no"
ITOOLS+=makewhatis
.endif
ITOOLS+=${LOCAL_ITOOLS}
#
# distributeworld
#
# Distributes everything compiled by a `buildworld'.
#
# installworld
#
# Installs everything compiled by a 'buildworld'.
#
# Non-base distributions produced by the base system
EXTRA_DISTRIBUTIONS=
.for libcompat in ${libcompats}
EXTRA_DISTRIBUTIONS+= lib${libcompat}
.endfor
.if ${MK_TESTS} != "no"
EXTRA_DISTRIBUTIONS+= tests
.endif
DEBUG_DISTRIBUTIONS=
.if ${MK_DEBUG_FILES} != "no"
DEBUG_DISTRIBUTIONS+= base ${EXTRA_DISTRIBUTIONS:S,tests,,}
.endif
MTREE_MAGIC?= mtree 2.0
distributeworld installworld stageworld: _installcheck_world .PHONY
mkdir -p ${INSTALLTMP}
progs=$$(for prog in ${ITOOLS}; do \
if progpath=`env PATH=${TMPPATH:Q} which $$prog`; then \
echo $$progpath; \
else \
echo "Required tool $$prog not found in PATH ("${TMPPATH:Q}")." >&2; \
exit 1; \
fi; \
done); \
if [ -z "${CROSSBUILD_HOST}" ] ; then \
libs=$$(ldd -f "%o %p\n" -f "%o %p\n" $$progs 2>/dev/null | sort -u | grep -Ev '\[.*]' | \
while read line; do \
set -- $$line; \
if [ "$$2 $$3" != "not found" ]; then \
echo $$2; \
else \
echo "Required library $$1 not found." >&2; \
exit 1; \
fi; \
done); \
fi; \
${INSTALLTMP_COPY_HOST_TOOL} $$libs $$progs ${INSTALLTMP}
cp -R $${PATH_LOCALE:-"/usr/share/locale"} ${INSTALLTMP}/locale
.if defined(NO_ROOT)
-mkdir -p ${METALOG:H}
echo "#${MTREE_MAGIC}" > ${METALOG}
.endif
.if make(distributeworld)
.for dist in ${EXTRA_DISTRIBUTIONS}
-mkdir ${DESTDIR}/${DISTDIR}/${dist}
${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.root.dist \
-p ${DESTDIR}/${DISTDIR}/${dist} >/dev/null
${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.usr.dist \
-p ${DESTDIR}/${DISTDIR}/${dist}/usr >/dev/null
${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.include.dist \
-p ${DESTDIR}/${DISTDIR}/${dist}/usr/include >/dev/null
.for d in ${LIBCOMPAT_INCLUDE_DIRS}
-mkdir ${DESTDIR}/${DISTDIR}/${dist}/usr/include/${d}
.endfor
.if ${MK_DEBUG_FILES} != "no"
${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.debug.dist \
-p ${DESTDIR}/${DISTDIR}/${dist}/usr/lib >/dev/null
.endif
.for libcompat in ${libcompats}
${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.lib${libcompat}.dist \
-p ${DESTDIR}/${DISTDIR}/${dist}/usr >/dev/null
.if ${MK_DEBUG_FILES} != "no"
${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.lib${libcompat}.dist \
-p ${DESTDIR}/${DISTDIR}/${dist}/usr/lib/debug/usr >/dev/null
.endif
.endfor
.if ${MK_TESTS} != "no" && ${dist} == "tests"
-mkdir -p ${DESTDIR}/${DISTDIR}/${dist}${TESTSBASE}
${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.tests.dist \
-p ${DESTDIR}/${DISTDIR}/${dist}${TESTSBASE} >/dev/null
.if ${MK_DEBUG_FILES} != "no"
${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.tests.dist \
-p ${DESTDIR}/${DISTDIR}/${dist}/usr/lib/debug/${TESTSBASE} >/dev/null
.endif
.endif
.if defined(NO_ROOT)
${IMAKEENV} ${DISTR_MTREE} -C -f ${.CURDIR}/etc/mtree/BSD.root.dist | \
sed -e 's#^\./#./${dist}/#' >> ${METALOG}
${IMAKEENV} ${DISTR_MTREE} -C -f ${.CURDIR}/etc/mtree/BSD.usr.dist | \
sed -e 's#^\./#./${dist}/usr/#' >> ${METALOG}
${IMAKEENV} ${DISTR_MTREE} -C -f ${.CURDIR}/etc/mtree/BSD.include.dist | \
sed -e 's#^\./#./${dist}/usr/include/#' >> ${METALOG}
.for d in ${LIBCOMPAT_INCLUDE_DIRS}
echo "./${dist}/usr/include/${d} type=dir uname=root gname=wheel mode=0755" >> ${METALOG}
.endfor
.for libcompat in ${libcompats}
${IMAKEENV} ${DISTR_MTREE} -C -f ${.CURDIR}/etc/mtree/BSD.lib${libcompat}.dist | \
sed -e 's#^\./#./${dist}/usr/#' >> ${METALOG}
.endfor
.endif
.endfor
-mkdir ${DESTDIR}/${DISTDIR}/base
${_+_}cd ${.CURDIR}/etc; ${CROSSENV} PATH=${TMPPATH:Q} ${MAKE} \
METALOG=${METALOG} ${IMAKE_INSTALL} ${IMAKE_MTREE} \
DISTBASE=/base DESTDIR=${INSTALL_DDIR}/base \
LOCAL_MTREE=${LOCAL_MTREE:Q} distrib-dirs
${INSTALL_SYMLINK} ${INSTALLFLAGS} usr/src/sys ${INSTALL_DDIR}/base/sys
.endif # make(distributeworld)
${_+_}cd ${.CURDIR}; ${IMAKE} re${.TARGET:S/world$//}; \
${IMAKEENV} rm -rf ${INSTALLTMP}
.if !make(packageworld) && ${MK_CAROOT} != "no"
@if which openssl>/dev/null; then \
PATH=${TMPPATH:Q}:${PATH:Q} \
sh ${SRCTOP}/usr.sbin/certctl/certctl.sh ${CERTCTLFLAGS} rehash; \
else \
echo "No openssl on the host, not rehashing certificates target -- /etc/ssl may not be populated."; \
fi
.endif
.if make(distributeworld)
.for dist in ${EXTRA_DISTRIBUTIONS}
find ${DESTDIR}/${DISTDIR}/${dist} -mindepth 1 -type d -empty -delete
.endfor
.if defined(NO_ROOT)
.for dist in base ${EXTRA_DISTRIBUTIONS}
@# For each file that exists in this dist, print the corresponding
@# line from the METALOG. This relies on the fact that
@# a line containing only the filename will sort immediately before
@# the relevant mtree line.
cd ${DESTDIR}/${DISTDIR}; \
find ./${dist} | ${METALOG_SORT_CMD} -u ${METALOG} - | \
awk 'BEGIN { print "#${MTREE_MAGIC}" } !/ type=/ { file = $$1 } / type=/ { if ($$1 == file) { sub(/^\.\/${dist}/, "."); print } }' > \
${DESTDIR}/${DISTDIR}/${dist}.meta
.endfor
.for dist in ${DEBUG_DISTRIBUTIONS}
@# For each file that exists in this dist, print the corresponding
@# line from the METALOG. This relies on the fact that
@# a line containing only the filename will sort immediately before
@# the relevant mtree line.
cd ${DESTDIR}/${DISTDIR}; \
find ./${dist}/usr/lib/debug | ${METALOG_SORT_CMD} -u ${METALOG} - | \
awk 'BEGIN { print "#${MTREE_MAGIC}" } !/ type=/ { file = $$1 } / type=/ { if ($$1 == file) { sub(/^\.\/${dist}/, "."); print } }' > \
${DESTDIR}/${DISTDIR}/${dist}.debug.meta
.endfor
.endif
.endif # make(distributeworld)
packageworld: .PHONY
.for dist in base ${EXTRA_DISTRIBUTIONS}
.if defined(NO_ROOT)
${_+_}cd ${DESTDIR}/${DISTDIR}/${dist}; \
${TAR_CMD} cvf - --exclude usr/lib/debug \
@${DESTDIR}/${DISTDIR}/${dist}.meta | \
${XZ_CMD} > ${PACKAGEDIR}/${dist}.txz
.else
${_+_}cd ${DESTDIR}/${DISTDIR}/${dist}; \
${TAR_CMD} cvf - --exclude usr/lib/debug . | \
${XZ_CMD} > ${PACKAGEDIR}/${dist}.txz
.endif
.endfor
.for dist in ${DEBUG_DISTRIBUTIONS}
. if defined(NO_ROOT)
${_+_}cd ${DESTDIR}/${DISTDIR}/${dist}; \
${TAR_CMD} cvf - @${DESTDIR}/${DISTDIR}/${dist}.debug.meta | \
${XZ_CMD} > ${PACKAGEDIR}/${dist}-dbg.txz
. else
${_+_}cd ${DESTDIR}/${DISTDIR}/${dist}; \
${TAR_CMD} cvLf - usr/lib/debug | \
${XZ_CMD} > ${PACKAGEDIR}/${dist}-dbg.txz
. endif
.endfor
makeman: .PHONY
${_+_}cd ${.CURDIR}/tools/build/options; sh makeman > \
${.CURDIR}/share/man/man5/src.conf.5
# Ensure no regressions in self-includeability of sys/*.h and net*/*.h
test-includes: .PHONY
${_+_}cd ${.CURDIR}/tools/build/test-includes; \
${WMAKEENV} ${MAKE} ${WORLD_FLAGS} DESTDIR=${WORLDTMP} test-includes
# We can't assume here that ${TMPPATH} will include ${PATH} or /usr/libexec
# because we may be building with a STRICTTMPPATH, so we explicitly include
# /usr/libexec here for flua. ${TMPPATH} still usefully includes anything else
# we may need to function.
_sysent_PATH= ${TMPPATH}:/usr/libexec
_sysent_dirs= sys/kern
_sysent_dirs+= sys/compat/freebsd32
_sysent_dirs+= sys/amd64/linux \
sys/amd64/linux32 \
sys/arm64/linux \
sys/i386/linux
sysent: .PHONY
.for _dir in ${_sysent_dirs}
sysent-${_dir}: .PHONY
@echo "${MAKE} -C ${.CURDIR}/${_dir} sysent"
${_+_}@env PATH=${_sysent_PATH:Q} ${MAKE} -C ${.CURDIR}/${_dir} sysent
sysent: sysent-${_dir}
.endfor
#
# reinstall
#
# If you have a build server, you can NFS mount the source and obj directories
# and do a 'make reinstall' on the *client* to install new binaries from the
# most recent server build.
#
restage reinstall: .MAKE .PHONY
@echo "--------------------------------------------------------------"
@echo ">>> Making hierarchy"
@echo "--------------------------------------------------------------"
${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 \
LOCAL_MTREE=${LOCAL_MTREE:Q} hierarchy
.if make(restage)
@echo "--------------------------------------------------------------"
@echo ">>> Making distribution"
@echo "--------------------------------------------------------------"
${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 \
LOCAL_MTREE=${LOCAL_MTREE:Q} distribution
.endif
@echo
@echo "--------------------------------------------------------------"
@echo ">>> Installing everything started on `LC_ALL=C date`"
@echo "--------------------------------------------------------------"
${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 install
.for libcompat in ${libcompats}
${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 install${libcompat}
.endfor
@echo "--------------------------------------------------------------"
@echo ">>> Installing everything completed on `LC_ALL=C date`"
@echo "--------------------------------------------------------------"
redistribute: .MAKE .PHONY
@echo "--------------------------------------------------------------"
@echo ">>> Distributing everything"
@echo "--------------------------------------------------------------"
${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 distribute
.for libcompat in ${libcompats}
${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 distribute${libcompat} \
DISTRIBUTION=lib${libcompat}
.endfor
distrib-dirs distribution: .MAKE .PHONY
${_+_}cd ${.CURDIR}/etc; ${CROSSENV} PATH=${TMPPATH:Q} ${MAKE} \
${IMAKE_INSTALL} ${IMAKE_MTREE} METALOG=${METALOG} ${.TARGET}
.if make(distribution)
${_+_}cd ${.CURDIR}; ${CROSSENV} PATH=${TMPPATH:Q} \
${MAKE} -f Makefile.inc1 ${IMAKE_INSTALL} \
METALOG=${METALOG} MK_TESTS=no \
MK_TESTS_SUPPORT=${MK_TESTS_SUPPORT} installconfig
.endif
#
# buildetc and installetc
#
buildetc: .MAKE .PHONY
${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 _worldtmp
${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 _legacy
${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 _bootstrap-tools \
MK_CROSS_COMPILER=no MK_TOOLCHAIN=no
${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 _obj \
SUBDIR_OVERRIDE=etc
${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 everything \
SUBDIR_OVERRIDE=etc
installetc: .MAKE .PHONY
@echo "--------------------------------------------------------------"
@echo ">>> Making hierarchy"
@echo "--------------------------------------------------------------"
${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 distrib-dirs
@echo "--------------------------------------------------------------"
@echo ">>> Making distribution"
@echo "--------------------------------------------------------------"
${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 distribution
#
# buildkernel and installkernel
#
# Which kernels to build and/or install is specified by setting
# KERNCONF. If not defined a GENERIC kernel is built/installed.
# Only the existing (depending TARGET) config files are used
# for building kernels and only the first of these is designated
# as the one being installed.
#
# Note that we have to use TARGET instead of TARGET_ARCH when
# we're in kernel-land. Since only TARGET_ARCH is (expected) to
# be set to cross-build, we have to make sure TARGET is set
# properly.
.if defined(KERNFAST)
NO_KERNELCLEAN= t
NO_KERNELCONFIG= t
NO_KERNELOBJ= t
# Shortcut for KERNCONF=Blah -DKERNFAST is now KERNFAST=Blah
.if !defined(KERNCONF) && ${KERNFAST} != "1"
KERNCONF=${KERNFAST}
.endif
.endif
.if ${TARGET_ARCH} == "powerpc64"
KERNCONF?= GENERIC64
.elif ${TARGET_ARCH} == "powerpc64le"
KERNCONF?= GENERIC64LE
.elif ${TARGET_ARCH} == "powerpcspe"
KERNCONF?= MPC85XXSPE
.else
KERNCONF?= GENERIC
.endif
INSTKERNNAME?= kernel
KERNSRCDIR?= ${.CURDIR}/sys
KRNLCONFDIR= ${KERNSRCDIR}/${TARGET}/conf
KRNLOBJDIR= ${OBJTOP}${KERNSRCDIR:C,^${.CURDIR},,}
KERNCONFDIR?= ${KRNLCONFDIR}
BUILDKERNELS=
INSTALLKERNEL=
.if defined(NO_INSTALLKERNEL)
# All of the BUILDKERNELS loops start at index 1.
BUILDKERNELS+= dummy
.endif
.for _kernel in ${KERNCONF}
.if !defined(_MKSHOWCONFIG) && exists(${KERNCONFDIR}/${_kernel})
BUILDKERNELS+= ${_kernel}
.if empty(INSTALLKERNEL) && !defined(NO_INSTALLKERNEL)
INSTALLKERNEL= ${_kernel}
.endif
.else
.if make(buildkernel)
.error Missing KERNCONF ${KERNCONFDIR}/${_kernel}
.endif
.endif
.endfor
_cleankernobj_fast_depend_hack: .PHONY
# 20191009 r353340 removal of opensolaris_atomic.S (also r353381)
.if ${MACHINE} != i386
.for f in opensolaris_atomic
.for m in opensolaris zfs
@if [ -e "${KRNLOBJDIR}/${KERNCONF}/modules${SRCTOP}/sys/modules/${m}/.depend.${f}.o" ] && \
grep -q ${f}.S "${KRNLOBJDIR}/${KERNCONF}/modules${SRCTOP}/sys/modules/${m}/.depend.${f}.o"; then \
echo "Removing stale dependencies for opensolaris_atomic"; \
rm -f ${KRNLOBJDIR}/${KERNCONF}/modules${SRCTOP}/sys/modules/${m}/.depend.${f}.*; \
fi
.endfor
.endfor
.endif
${WMAKE_TGTS:N_cleanworldtmp:N_worldtmp:${libcompats:@v@Nbuild${v}@:ts:}}: .MAKE .PHONY
${.ALLTARGETS:M_*:N_cleanworldtmp:N_worldtmp}: .MAKE .PHONY
# record kernel(s) build time in seconds
.if make(buildkernel)
_BUILDKERNEL_START!= date '+%s'
.endif
#
# buildkernel
#
# Builds all kernels defined by BUILDKERNELS.
#
buildkernel: .MAKE .PHONY
.if empty(BUILDKERNELS:Ndummy)
@echo "ERROR: Missing kernel configuration file(s) (${KERNCONF})."; \
false
.endif
@echo
.for _kernel in ${BUILDKERNELS:Ndummy}
@echo "--------------------------------------------------------------"
@echo ">>> Kernel build for ${_kernel} started on `LC_ALL=C date`"
@echo "--------------------------------------------------------------"
@echo "===> ${_kernel}"
mkdir -p ${KRNLOBJDIR}
.if !defined(NO_KERNELCONFIG)
@echo
@echo "--------------------------------------------------------------"
@echo ">>> stage 1: configuring the kernel"
@echo "--------------------------------------------------------------"
cd ${KRNLCONFDIR}; \
PATH=${TMPPATH:Q} \
config ${CONFIGARGS} -d ${KRNLOBJDIR}/${_kernel} \
-I '${KERNCONFDIR}' -I '${KRNLCONFDIR}' \
'${KERNCONFDIR}/${_kernel}'
.endif
.if ${MK_CLEAN} == "yes" && !defined(NO_KERNELCLEAN)
@echo
@echo "--------------------------------------------------------------"
@echo ">>> stage 2.1: cleaning up the object tree"
@echo "--------------------------------------------------------------"
${_+_}cd ${KRNLOBJDIR}/${_kernel}; ${KMAKE} ${CLEANDIR}
.else
${_+_}cd ${.CURDIR}; ${WMAKE} _cleankernobj_fast_depend_hack
.endif
.if !defined(NO_KERNELOBJ)
@echo
@echo "--------------------------------------------------------------"
@echo ">>> stage 2.2: rebuilding the object tree"
@echo "--------------------------------------------------------------"
${_+_}cd ${KRNLOBJDIR}/${_kernel}; ${KMAKE} obj
.endif
@echo
@echo "--------------------------------------------------------------"
@echo ">>> stage 2.3: build tools"
@echo "--------------------------------------------------------------"
${_+_}cd ${.CURDIR}; ${KTMAKE} kernel-tools
@echo
@echo "--------------------------------------------------------------"
@echo ">>> stage 3.1: building everything"
@echo "--------------------------------------------------------------"
${_+_}cd ${KRNLOBJDIR}/${_kernel}; ${KMAKE} all -DNO_MODULES_OBJ
@echo "--------------------------------------------------------------"
@echo ">>> Kernel build for ${_kernel} completed on `LC_ALL=C date`"
@echo "--------------------------------------------------------------"
.endfor
@seconds=$$(($$(date '+%s') - ${_BUILDKERNEL_START})); \
echo -n ">>> Kernel(s) ${BUILDKERNELS} built in $$seconds seconds, "; \
echo "ncpu: $$(${_ncpu_cmd})${.MAKE.JOBS:S/^/, make -j/}"
@echo "--------------------------------------------------------------"
.if !make(packages) && !make(update-packages)
NO_INSTALLEXTRAKERNELS?= yes
.else
# packages/update-packages installs kernels to a staging directory then builds
# packages from the result to be installed, typically to other systems. It is
# less surprising for these targets to honor KERNCONF if multiple kernels are
# specified.
NO_INSTALLEXTRAKERNELS?= no
.endif
#
# installkernel, etc.
#
# Install the kernel defined by INSTALLKERNEL
#
installkernel installkernel.debug \
reinstallkernel reinstallkernel.debug: _installcheck_kernel .PHONY
.if !defined(NO_INSTALLKERNEL)
.if empty(INSTALLKERNEL)
@echo "ERROR: No kernel \"${KERNCONF}\" to install."; \
false
.endif
@echo "--------------------------------------------------------------"
@echo ">>> Installing kernel ${INSTALLKERNEL} on $$(LC_ALL=C date)"
@echo "--------------------------------------------------------------"
${_+_}cd ${KRNLOBJDIR}/${INSTALLKERNEL}; \
${CROSSENV} PATH=${TMPPATH:Q} \
${MAKE} ${IMAKE_INSTALL} KERNEL=${INSTKERNNAME} ${.TARGET:S/kernel//}
@echo "--------------------------------------------------------------"
@echo ">>> Installing kernel ${INSTALLKERNEL} completed on $$(LC_ALL=C date)"
@echo "--------------------------------------------------------------"
.endif
.if ${BUILDKERNELS:[#]} > 1 && ${NO_INSTALLEXTRAKERNELS} != "yes"
.for _kernel in ${BUILDKERNELS:[2..-1]}
@echo "--------------------------------------------------------------"
@echo ">>> Installing kernel ${_kernel} $$(LC_ALL=C date)"
@echo "--------------------------------------------------------------"
${_+_}cd ${KRNLOBJDIR}/${_kernel}; \
${CROSSENV} PATH=${TMPPATH:Q} \
${MAKE} ${IMAKE_INSTALL} KERNEL=${INSTKERNNAME}.${_kernel} ${.TARGET:S/kernel//}
@echo "--------------------------------------------------------------"
@echo ">>> Installing kernel ${_kernel} completed on $$(LC_ALL=C date)"
@echo "--------------------------------------------------------------"
.endfor
.endif
distributekernel distributekernel.debug: .PHONY
.if !defined(NO_INSTALLKERNEL)
.if empty(INSTALLKERNEL)
@echo "ERROR: No kernel \"${KERNCONF}\" to install."; \
false
.endif
mkdir -p ${DESTDIR}/${DISTDIR}
.if defined(NO_ROOT)
@echo "#${MTREE_MAGIC}" > ${DESTDIR}/${DISTDIR}/kernel.premeta
.endif
${_+_}cd ${KRNLOBJDIR}/${INSTALLKERNEL}; \
${IMAKEENV} ${IMAKE_INSTALL:S/METALOG/kernel.premeta/} \
${IMAKE_MTREE} PATH=${TMPPATH:Q} ${MAKE} KERNEL=${INSTKERNNAME} \
DESTDIR=${INSTALL_DDIR}/kernel \
${.TARGET:S/distributekernel/install/}
.if defined(NO_ROOT)
@sed -e 's|^./kernel|.|' ${DESTDIR}/${DISTDIR}/kernel.premeta > \
${DESTDIR}/${DISTDIR}/kernel.meta
.endif
.endif
.if ${BUILDKERNELS:[#]} > 1 && ${NO_INSTALLEXTRAKERNELS} != "yes"
.for _kernel in ${BUILDKERNELS:[2..-1]}
.if defined(NO_ROOT)
@echo "#${MTREE_MAGIC}" > ${DESTDIR}/${DISTDIR}/kernel.${_kernel}.premeta
.endif
${_+_}cd ${KRNLOBJDIR}/${_kernel}; \
${IMAKEENV} ${IMAKE_INSTALL:S/METALOG/kernel.${_kernel}.premeta/} \
${IMAKE_MTREE} PATH=${TMPPATH:Q} ${MAKE} \
KERNEL=${INSTKERNNAME}.${_kernel} \
DESTDIR=${INSTALL_DDIR}/kernel.${_kernel} \
${.TARGET:S/distributekernel/install/}
.if defined(NO_ROOT)
@sed -e "s|^./kernel.${_kernel}|.|" \
${DESTDIR}/${DISTDIR}/kernel.${_kernel}.premeta > \
${DESTDIR}/${DISTDIR}/kernel.${_kernel}.meta
.endif
.endfor
.endif
packagekernel: .PHONY
.if defined(NO_ROOT)
.if !defined(NO_INSTALLKERNEL)
cd ${DESTDIR}/${DISTDIR}/kernel; \
${TAR_CMD} cvf - --exclude '*.debug' \
@${DESTDIR}/${DISTDIR}/kernel.meta | \
${XZ_CMD} > ${PACKAGEDIR}/kernel.txz
.endif
.if ${MK_DEBUG_FILES} != "no"
cd ${DESTDIR}/${DISTDIR}/kernel; \
${TAR_CMD} cvf - --include '*/*/*.debug' \
@${DESTDIR}/${DISTDIR}/kernel.meta | \
${XZ_CMD} > ${DESTDIR}/${DISTDIR}/kernel-dbg.txz
.endif
.if ${BUILDKERNELS:[#]} > 1 && ${NO_INSTALLEXTRAKERNELS} != "yes"
.for _kernel in ${BUILDKERNELS:[2..-1]}
cd ${DESTDIR}/${DISTDIR}/kernel.${_kernel}; \
${TAR_CMD} cvf - --exclude '*.debug' \
@${DESTDIR}/${DISTDIR}/kernel.${_kernel}.meta | \
${XZ_CMD} > ${PACKAGEDIR}/kernel.${_kernel}.txz
.if ${MK_DEBUG_FILES} != "no"
cd ${DESTDIR}/${DISTDIR}/kernel.${_kernel}; \
${TAR_CMD} cvf - --include '*/*/*.debug' \
@${DESTDIR}/${DISTDIR}/kernel.${_kernel}.meta | \
${XZ_CMD} > ${DESTDIR}/${DISTDIR}/kernel.${_kernel}-dbg.txz
.endif
.endfor
.endif
.else
.if !defined(NO_INSTALLKERNEL)
cd ${DESTDIR}/${DISTDIR}/kernel; \
${TAR_CMD} cvf - --exclude '*.debug' . | \
${XZ_CMD} > ${PACKAGEDIR}/kernel.txz
.endif
.if ${MK_DEBUG_FILES} != "no"
cd ${DESTDIR}/${DISTDIR}/kernel; \
${TAR_CMD} cvf - --include '*/*/*.debug' $$(eval find .) | \
${XZ_CMD} > ${DESTDIR}/${DISTDIR}/kernel-dbg.txz
.endif
.if ${BUILDKERNELS:[#]} > 1 && ${NO_INSTALLEXTRAKERNELS} != "yes"
.for _kernel in ${BUILDKERNELS:[2..-1]}
cd ${DESTDIR}/${DISTDIR}/kernel.${_kernel}; \
${TAR_CMD} cvf - --exclude '*.debug' . | \
${XZ_CMD} > ${PACKAGEDIR}/kernel.${_kernel}.txz
.if ${MK_DEBUG_FILES} != "no"
cd ${DESTDIR}/${DISTDIR}/kernel.${_kernel}; \
${TAR_CMD} cvf - --include '*/*/*.debug' $$(eval find .) | \
${XZ_CMD} > ${DESTDIR}/${DISTDIR}/kernel.${_kernel}-dbg.txz
.endif
.endfor
.endif
.endif
stagekernel: .PHONY
${_+_}${MAKE} -C ${.CURDIR} ${.MAKEFLAGS} distributekernel
PORTSDIR?= /usr/ports
WSTAGEDIR?= ${OBJTOP}/worldstage
KSTAGEDIR?= ${OBJTOP}/kernelstage
REPODIR?= ${OBJROOT}repo
PKG_FORMAT?= txz
PKG_REPO_SIGNING_KEY?= # empty
PKG_OUTPUT_DIR?= ${PKG_VERSION}
.ORDER: stage-packages create-packages
.ORDER: create-packages create-world-packages
.ORDER: create-packages create-kernel-packages
.ORDER: create-packages sign-packages
_pkgbootstrap: .PHONY
.if make(*package*) && !exists(${LOCALBASE}/sbin/pkg)
@env ASSUME_ALWAYS_YES=YES pkg bootstrap
.endif
#
# Determine PKG_ABI from newvers.sh if not already set.
#
.if !defined(PKG_ABI) && (make(create-world-packages-jobs) || make(create-kernel-packages*) || make(real-update-packages) || make(sign-packages))
PKG_ABI=${_TYPE}:${MAJOR_REVISION}:${TARGET_ARCH}
.endif
PKG_BIN_VERSION!=${PKG_CMD} --version </dev/null 2>/dev/null |\
awk -F. '/^[0-9.]+$$/ {print $$1 * 10000 + $$2 * 100 + $$3}'
.if ${PKG_BIN_VERSION} < 11700
PKG_EXT= ${PKG_FORMAT}
.else
PKG_EXT= pkg
.endif
.if !defined(PKG_VERSION_FROM) && make(real-update-packages)
.if defined(PKG_ABI)
.if exists(${REPODIR}/${PKG_ABI})
PKG_VERSION_FROM!=/usr/bin/readlink ${REPODIR}/${PKG_ABI}/latest
PKG_VERSION_FROM_DIR= ${REPODIR}/${PKG_ABI}/${PKG_VERSION_FROM}
.else
PKG_VERSION_FROM=
PKG_VERSION_FROM_DIR=
.endif
.endif
.endif
PKGMAKEARGS+= PKG_VERSION=${PKG_VERSION} \
NO_INSTALLEXTRAKERNELS=${NO_INSTALLEXTRAKERNELS}
packages: .PHONY
${_+_}${MAKE} -C ${.CURDIR} ${PKGMAKEARGS} real-packages
update-packages: .PHONY
${_+_}${MAKE} -C ${.CURDIR} ${PKGMAKEARGS} real-update-packages
package-pkg: .PHONY
rm -rf /tmp/ports.${TARGET} || :
env ${WMAKEENV:Q} SRCDIR=${.CURDIR} PORTSDIR=${PORTSDIR} REVISION=${_REVISION} \
PKG_CMD=${PKG_CMD} PKG_VERSION=${PKG_VERSION} REPODIR=${REPODIR} \
WSTAGEDIR=${WSTAGEDIR} \
sh ${.CURDIR}/release/scripts/make-pkg-package.sh
real-packages: stage-packages create-packages sign-packages .PHONY
real-update-packages: stage-packages .PHONY
${_+_}${MAKE} -C ${.CURDIR} PKG_VERSION=${PKG_VERSION} create-packages
.if empty(PKG_VERSION_FROM_DIR)
@echo "==> Bootstrapping repository, not checking for new packages"
.else
@echo "==> Checking for new packages (comparing ${PKG_VERSION} to ${PKG_VERSION_FROM})"
@for pkg in ${PKG_VERSION_FROM_DIR}/${PKG_NAME_PREFIX}-*; do \
pkgname=$$(pkg query -F $${pkg} '%n' | sed 's/${PKG_NAME_PREFIX}-\(.*\)/\1/') ; \
newpkgname=${PKG_NAME_PREFIX}-$${pkgname}-${PKG_VERSION}.${PKG_EXT} ; \
oldsum=$$(pkg query -F $${pkg} '%X') ; \
if [ ! -f ${REPODIR}/${PKG_ABI}/${PKG_VERSION}/$${newpkgname} ]; then \
continue; \
fi ; \
newsum=$$(pkg query -F ${REPODIR}/${PKG_ABI}/${PKG_VERSION}/$${newpkgname} '%X') ; \
if [ "$${oldsum}" == "$${newsum}" ]; then \
echo "==> Keeping old ${PKG_NAME_PREFIX}-$${pkgname}-${PKG_VERSION_FROM}.${PKG_EXT}" ; \
rm ${REPODIR}/${PKG_ABI}/${PKG_VERSION}/$${newpkgname} ; \
cp $${pkg} ${REPODIR}/${PKG_ABI}/${PKG_VERSION} ; \
else \
echo "==> New package $${newpkgname}" ; \
fi ; \
done
.endif
${_+_}@cd ${.CURDIR}; \
${MAKE} -f Makefile.inc1 PKG_VERSION=${PKG_VERSION} sign-packages
stage-packages-world: .PHONY
@mkdir -p ${WSTAGEDIR}
${_+_}@cd ${.CURDIR}; \
${MAKE} DESTDIR=${WSTAGEDIR} -DNO_ROOT stageworld
stage-packages-kernel: .PHONY
@mkdir -p ${KSTAGEDIR}
${_+_}@cd ${.CURDIR}; \
${MAKE} DESTDIR=${KSTAGEDIR} -DNO_ROOT stagekernel
stage-packages: .PHONY stage-packages-world stage-packages-kernel
_repodir: .PHONY
@mkdir -p ${REPODIR}
create-packages-world: _pkgbootstrap _repodir .PHONY
${_+_}@cd ${.CURDIR}; \
${MAKE} -f Makefile.inc1 \
DESTDIR=${WSTAGEDIR} \
PKG_VERSION=${PKG_VERSION} create-world-packages
create-packages-kernel: _pkgbootstrap _repodir .PHONY
${_+_}@cd ${.CURDIR}; \
${MAKE} -f Makefile.inc1 \
DESTDIR=${KSTAGEDIR} \
PKG_VERSION=${PKG_VERSION} DISTDIR=kernel \
SOURCE_DATE_EPOCH=${SOURCE_DATE_EPOCH} \
create-kernel-packages
create-packages: .PHONY create-packages-world create-packages-kernel
create-world-packages: _pkgbootstrap .PHONY
@rm -f ${WSTAGEDIR}/*.plist 2>/dev/null || :
@cd ${WSTAGEDIR} ; \
${METALOG_SORT_CMD} ${WSTAGEDIR}/${DISTDIR}/METALOG | \
awk -f ${SRCDIR}/release/scripts/mtree-to-plist.awk
@for plist in ${WSTAGEDIR}/*.plist; do \
plist=$${plist##*/} ; \
pkgname=$${plist%.plist} ; \
echo "_PKGS+= $${pkgname}" ; \
done > ${WSTAGEDIR}/packages.mk
${_+_}@cd ${.CURDIR}; \
${MAKE} -f Makefile.inc1 create-world-packages-jobs \
SOURCE_DATE_EPOCH=${SOURCE_DATE_EPOCH} \
.MAKE.JOB.PREFIX=
.if make(create-world-packages-jobs)
.include "${WSTAGEDIR}/packages.mk"
.endif
create-world-packages-jobs: .PHONY
.for pkgname in ${_PKGS}
create-world-packages-jobs: create-world-package-${pkgname}
create-world-package-${pkgname}: .PHONY
@sh ${SRCDIR}/release/packages/generate-ucl.sh -o ${pkgname} \
-s ${SRCDIR} -u ${WSTAGEDIR}/${pkgname}.ucl
@awk -F\" ' \
/^name/ { printf("===> Creating %s-", $$2); next } \
/^version/ { print $$2; next } \
' ${WSTAGEDIR}/${pkgname}.ucl
@if [ "${pkgname}" == "runtime" ]; then \
sed -i '' -e "s/%VCS_REVISION%/${VCS_REVISION}/" ${WSTAGEDIR}/${pkgname}.ucl ; \
fi
${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/usr/bin/uname -o ALLOW_BASE_SHLIBS=yes \
create -f ${PKG_FORMAT} -M ${WSTAGEDIR}/${pkgname}.ucl \
-p ${WSTAGEDIR}/${pkgname}.plist \
-r ${WSTAGEDIR} \
-o ${REPODIR}/${PKG_ABI}/${PKG_OUTPUT_DIR}
.endfor
_default_flavor= -default
.if make(*package*) && exists(${KSTAGEDIR}/kernel.meta)
. if ${MK_DEBUG_FILES} != "no"
_debug=-dbg
. endif
create-kernel-packages: .PHONY
. for flavor in "" ${_debug}
create-kernel-packages: create-kernel-packages-flavor${flavor:C,^""$,${_default_flavor},}
create-kernel-packages-flavor${flavor:C,^""$,${_default_flavor},}: _pkgbootstrap .PHONY
@cd ${KSTAGEDIR}/${DISTDIR} ; \
${METALOG_SORT_CMD} ${KSTAGEDIR}/kernel.meta | \
awk -f ${SRCDIR}/release/scripts/mtree-to-plist.awk \
-v kernel=yes -v _kernconf=${INSTALLKERNEL} ; \
sed -e "s/%VERSION%/${PKG_VERSION}/" \
-e "s/%PKGNAME%/kernel-${INSTALLKERNEL:tl}${flavor}/" \
-e "s/%KERNELDIR%/kernel/" \
-e "s/%COMMENT%/FreeBSD ${INSTALLKERNEL} kernel ${flavor}/" \
-e "s/%DESC%/FreeBSD ${INSTALLKERNEL} kernel ${flavor}/" \
-e "s/ %VCS_REVISION%/${VCS_REVISION}/" \
-e "s/%PKG_NAME_PREFIX%/${PKG_NAME_PREFIX}/" \
-e "s/%PKG_MAINTAINER%/${PKG_MAINTAINER}/" \
-e "s|%PKG_WWW%|${PKG_WWW}|" \
${SRCDIR}/release/packages/kernel.ucl \
> ${KSTAGEDIR}/${DISTDIR}/kernel.${INSTALLKERNEL}${flavor}.ucl ; \
awk -F\" ' \
/name/ { printf("===> Creating %s-", $$2); next } \
/version/ {print $$2; next } ' \
${KSTAGEDIR}/${DISTDIR}/kernel.${INSTALLKERNEL}${flavor}.ucl ; \
${PKG_CMD} -o ABI=${PKG_ABI} -o ALLOW_BASE_SHLIBS=yes \
create -f ${PKG_FORMAT} \
-M ${KSTAGEDIR}/${DISTDIR}/kernel.${INSTALLKERNEL}${flavor}.ucl \
-p ${KSTAGEDIR}/${DISTDIR}/kernel.${INSTALLKERNEL}${flavor}.plist \
-r ${KSTAGEDIR}/${DISTDIR} \
-o ${REPODIR}/${PKG_ABI}/${PKG_OUTPUT_DIR}
. endfor
.endif
.if ${BUILDKERNELS:[#]} > 1 && ${NO_INSTALLEXTRAKERNELS} != "yes"
. for _kernel in ${BUILDKERNELS:[2..-1]}
. if exists(${KSTAGEDIR}/kernel.${_kernel}.meta)
. if ${MK_DEBUG_FILES} != "no"
_debug=-dbg
. endif
. for flavor in "" ${_debug}
create-kernel-packages: create-kernel-packages-extra-flavor${flavor:C,^""$,${_default_flavor},}-${_kernel}
create-kernel-packages-extra-flavor${flavor:C,^""$,${_default_flavor},}-${_kernel}: _pkgbootstrap .PHONY
@cd ${KSTAGEDIR}/kernel.${_kernel} ; \
${METALOG_SORT_CMD} ${KSTAGEDIR}/kernel.${_kernel}.meta | \
awk -f ${SRCDIR}/release/scripts/mtree-to-plist.awk \
-v kernel=yes -v _kernconf=${_kernel} ; \
sed -e "s/%VERSION%/${PKG_VERSION}/" \
-e "s/%PKGNAME%/kernel-${_kernel:tl}${flavor}/" \
-e "s/%KERNELDIR%/kernel.${_kernel}/" \
-e "s/%COMMENT%/FreeBSD ${_kernel} kernel ${flavor}/" \
-e "s/%DESC%/FreeBSD ${_kernel} kernel ${flavor}/" \
-e "s/ %VCS_REVISION%/${VCS_REVISION}/" \
-e "s/%PKG_NAME_PREFIX%/${PKG_NAME_PREFIX}/" \
-e "s/%PKG_MAINTAINER%/${PKG_MAINTAINER}/" \
-e "s|%PKG_WWW%|${PKG_WWW}|" \
${SRCDIR}/release/packages/kernel.ucl \
> ${KSTAGEDIR}/kernel.${_kernel}/kernel.${_kernel}${flavor}.ucl ; \
awk -F\" ' \
/name/ { printf("===> Creating %s-", $$2); next } \
/version/ {print $$2; next } ' \
${KSTAGEDIR}/kernel.${_kernel}/kernel.${_kernel}${flavor}.ucl ; \
${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/usr/bin/uname -o ALLOW_BASE_SHLIBS=yes \
create -f ${PKG_FORMAT} \
-M ${KSTAGEDIR}/kernel.${_kernel}/kernel.${_kernel}${flavor}.ucl \
-p ${KSTAGEDIR}/kernel.${_kernel}/kernel.${_kernel}${flavor}.plist \
-r ${KSTAGEDIR}/kernel.${_kernel} \
-o ${REPODIR}/${PKG_ABI}/${PKG_OUTPUT_DIR}
. endfor
. endif
. endfor
.endif
sign-packages: _pkgbootstrap .PHONY
printf "version = 2;\n" > ${WSTAGEDIR}/meta
.if ${PKG_BIN_VERSION} < 11700
printf "packing_format = \"${PKG_FORMAT}\";\n" >> ${WSTAGEDIR}/meta
.endif
@[ -L "${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/usr/bin/uname config ABI)/latest" ] && \
unlink ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/usr/bin/uname config ABI)/latest ; \
${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/usr/bin/uname repo \
-m ${WSTAGEDIR}/meta \
-o ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/usr/bin/uname config ABI)/${PKG_VERSION} \
${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/usr/bin/uname config ABI)/${PKG_VERSION} \
${PKG_REPO_SIGNING_KEY} ; \
cd ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/usr/bin/uname config ABI); \
ln -s ${PKG_OUTPUT_DIR} latest
#
#
# checkworld
#
# Run test suite on installed world.
#
checkworld: .PHONY
@if [ ! -x "${LOCALBASE}/bin/kyua" ] && [ ! -x "/usr/bin/kyua" ]; then \
echo "You need kyua (devel/kyua) to run the test suite." | /usr/bin/fmt; \
exit 1; \
fi
${_+_}PATH="$$PATH:${LOCALBASE}/bin" kyua test -k ${TESTSBASE}/Kyuafile
#
#
# doxygen
#
# Build the API documentation with doxygen
#
doxygen: .PHONY
@if [ ! -x "${LOCALBASE}/bin/doxygen" ]; then \
echo "You need doxygen (devel/doxygen) to generate the API documentation of the kernel." | /usr/bin/fmt; \
exit 1; \
fi
${_+_}cd ${.CURDIR}/tools/kerneldoc/subsys; ${MAKE} obj all
#
# ------------------------------------------------------------------------
#
# From here onwards are utility targets used by the 'make world' and
# related targets. If your 'world' breaks, you may like to try to fix
# the problem and manually run the following targets to attempt to
# complete the build. Beware, this is *not* guaranteed to work, you
# need to have a pretty good grip on the current state of the system
# to attempt to manually finish it. If in doubt, 'make world' again.
#
#
# legacy: Build compatibility shims for the next three targets. This is a
# minimal set of tools and shims necessary to compensate for older systems
# which don't have the APIs required by the targets built in bootstrap-tools,
# build-tools or cross-tools.
#
legacy: .PHONY
.if ${BOOTSTRAPPING} < ${MINIMUM_SUPPORTED_OSREL} && ${BOOTSTRAPPING} != 0
@echo "ERROR: Source upgrades from versions prior to ${MINIMUM_SUPPORTED_REL} are not supported."; \
false
.endif
.for _tool in \
tools/build \
${LOCAL_LEGACY_DIRS}
${_+_}@${ECHODIR} "===> ${_tool} (obj,includes,all,install)"; \
cd ${.CURDIR}/${_tool}; \
if [ -z "${NO_OBJWALK}" ]; then ${MAKE} DIRPRFX=${_tool}/ obj; fi; \
${MAKE} DIRPRFX=${_tool}/ DESTDIR=${WORLDTMP}/legacy includes; \
${MAKE} DIRPRFX=${_tool}/ MK_INCLUDES=no all; \
${MAKE} DIRPRFX=${_tool}/ MK_INCLUDES=no \
DESTDIR=${WORLDTMP}/legacy install
.endfor
#
# bootstrap-tools: Build tools needed for compatibility. These are binaries that
# are built to build other binaries in the system. However, the focus of these
# binaries is usually quite narrow. Bootstrap tools use the host's compiler and
# libraries, augmented by -legacy, in addition to the libraries built during
# bootstrap-tools.
#
_bt= _bootstrap-tools
# We want to run the build with only ${WORLDTMP} in $PATH to ensure we don't
# accidentally run tools that are incompatible but happen to be in $PATH.
# This is especially important when building on Linux/MacOS where many of the
# programs used during the build accept different flags or generate different
# output. On those platforms we only symlink the tools known to be compatible
# (e.g. basic utilities such as mkdir) into ${WORLDTMP} and build all others
# from the FreeBSD sources during the bootstrap-tools stage.
# We want to build without the user's $PATH starting in the bootstrap-tools
# phase so the tools used in that phase (ln, cp, etc) must have already been
# linked to $WORLDTMP. The tools are listed in the _host_tools_to_symlink
# variable in tools/build/Makefile and are linked during the legacy phase.
# Since they could be Linux or MacOS binaries, too we must only use flags that
# are portable across operating systems.
# If BOOTSTRAP_ALL_TOOLS is set we will build all the required tools from the
# current source tree. Otherwise we create a symlink to the version found in
# $PATH during the bootstrap-tools stage.
# When building on non-FreeBSD systems we can't assume that the host binaries
# accept compatible flags or produce compatible output. Therefore we force
# BOOTSTRAP_ALL_TOOLS and just build the FreeBSD version of the binary.
.if defined(CROSSBUILD_HOST)
BOOTSTRAP_ALL_TOOLS:= 1
.endif
.if defined(BOOTSTRAP_ALL_TOOLS)
# BOOTSTRAPPING will be set on the command line so we can't override it here.
# Instead set BOOTSTRAPPING_OSRELDATE so that the value 0 is set ${BSARGS}
BOOTSTRAPPING_OSRELDATE:= 0
.endif
.if ${MK_GAMES} != "no"
_strfile= usr.bin/fortune/strfile
.endif
# vtfontcvt is used to build font files for loader and to generate
# C source for loader built in font (8x16.c).
_vtfontcvt= usr.bin/vtfontcvt
# zic is used to compile timezone data
.if ${MK_ZONEINFO} != "no"
_zic= usr.sbin/zic
.endif
# If we are not building the bootstrap because BOOTSTRAPPING is sufficient
# we symlink the host version to $WORLDTMP instead. By doing this we can also
# detect when a bootstrap tool is being used without the required MK_FOO.
# If you add a new bootstrap tool where we could also use the host version,
# please ensure that you also add a .else case where you add the tool to the
# _bootstrap_tools_links variable.
.if ${BOOTSTRAPPING} < 1000033
# Note: lex needs m4 to build but m4 also depends on lex (which needs m4 to
# generate any files). To fix this cyclic dependency we can build a bootstrap
# version of m4 (with pre-generated files) then use that to build the real m4.
# We can't simply use the host m4 since e.g. the macOS version does not accept
# the flags that are passed by lex.
# For lex we also use the pre-gerated files since we would otherwise need to
# build awk and sed first (which need lex to build)
# TODO: add a _bootstrap_lex and then build the real lex afterwards
_lex= usr.bin/lex
_m4= tools/build/bootstrap-m4 usr.bin/m4
${_bt}-tools/build/bootstrap-m4: ${_bt}-usr.bin/lex ${_bt}-lib/libopenbsd ${_bt}-usr.bin/yacc
${_bt}-usr.bin/m4: ${_bt}-lib/libopenbsd ${_bt}-usr.bin/yacc ${_bt}-usr.bin/lex ${_bt}-tools/build/bootstrap-m4
_bt_m4_depend=${_bt}-usr.bin/m4
_bt_lex_depend=${_bt}-usr.bin/lex ${_bt_m4_depend}
.else
_bootstrap_tools_links+=m4 lex
.endif
# ELF Tool Chain libraries are needed for ELF tools and dtrace tools.
# r296685 fix cross-endian objcopy
# r310724 fixed PR 215350, a crash in libdwarf with objects built by GCC 6.2.
# r334881 added libdwarf constants used by ctfconvert.
# r338478 fixed a crash in objcopy for mips64el objects
# r339083 libelf: correct mips64el test to use ELF header
# r348347 Add missing powerpc64 relocation support to libdwarf
.if ${BOOTSTRAPPING} < 1300030
_elftoolchain_libs= lib/libelf lib/libdwarf lib/libzstd
${_bt}-lib/libelf: ${_bt_m4_depend}
${_bt}-lib/libdwarf: ${_bt_m4_depend}
.endif
# flua is required to regenerate syscall files. It first appeared during the
# 13.0-CURRENT cycle, thus needs to be built on -older releases and stable
# branches.
.if ${BOOTSTRAPPING} < 1300059
${_bt}-libexec/flua: ${_bt}-lib/liblua ${_bt}-lib/libucl
_flua= lib/liblua lib/libucl libexec/flua
.endif
# r245440 mtree -N support added
# r313404 requires sha384.h for libnetbsd, added to libmd in r292782
.if ${BOOTSTRAPPING} < 1100093
_libnetbsd= lib/libnetbsd
_nmtree= lib/libmd \
usr.sbin/nmtree
${_bt}-lib/libnetbsd: ${_bt}-lib/libmd
${_bt}-usr.sbin/nmtree: ${_bt}-lib/libnetbsd
.else
_bootstrap_tools_links+=mtree
.endif
# r246097: log addition login.conf.db, passwd, pwd.db, and spwd.db with cat -l
.if ${BOOTSTRAPPING} < 1000027
_cat= bin/cat
.else
_bootstrap_tools_links+=cat
.endif
# r277259 crunchide: Correct 64-bit section header offset
# r281674 crunchide: always include both 32- and 64-bit ELF support
.if ${BOOTSTRAPPING} < 1100078
_crunchide= usr.sbin/crunch/crunchide
.else
_bootstrap_tools_links+=crunchide
.endif
# 1400052, 1300526, 1203507: Removed -dc from linker invocation
.if ${BOOTSTRAPPING} < 1203507 || \
(${BOOTSTRAPPING} > 1300000 && ${BOOTSTRAPPING} < 1300526) || \
(${BOOTSTRAPPING} > 1400000 && ${BOOTSTRAPPING} < 1400052)
_crunchgen= usr.sbin/crunch/crunchgen
.else
_bootstrap_tools_links+=crunchgen
.endif
# The ATKBD_DFLT_KEYMAP, UKBD_DFLT_KEYMAP and KBDMUX_DFLT_KEYMAP kernel options
# require kbdcontrol. Note that, even on FreeBSD, the host will lack kbdcontrol
# if built with WITHOUT_LEGACY_CONSOLE.
.if defined(BOOTSTRAP_ALL_TOOLS) || !exists(/usr/sbin/kbdcontrol)
_kbdcontrol= usr.sbin/kbdcontrol
.else
_bootstrap_tools_links+=kbdcontrol
.endif
.if ${MK_DISK_IMAGE_TOOLS_BOOTSTRAP} != "no"
_etdump= usr.bin/etdump
_makefs= usr.sbin/makefs
_libnetbsd= lib/libnetbsd
${_bt}-usr.sbin/makefs: ${_bt}-lib/libnetbsd
.if defined(BOOTSTRAP_ALL_TOOLS)
_libsbuf= lib/libsbuf
${_bt}-usr.sbin/makefs: ${_bt}-lib/libsbuf
.endif
.endif
# 1300102: VHDX support
.if ${BOOTSTRAPPING} < 1201520 || \
(${BOOTSTRAPPING} > 1300000 && ${BOOTSTRAPPING} < 1300102) || \
${MK_DISK_IMAGE_TOOLS_BOOTSTRAP} != "no"
_mkimg= usr.bin/mkimg
.else
_bootstrap_tools_links+=mkimg
.endif
_yacc= usr.bin/yacc
.if ${MK_BSNMP} != "no"
_gensnmptree= usr.sbin/bsnmpd/gensnmptree
.endif
# We need to build tblgen when we're building clang or lld, either as
# bootstrap tools, or as the part of the normal build.
# llvm-tblgen is also needed for various llvm binutils (e.g. objcopy).
.if ${MK_CLANG_BOOTSTRAP} != "no" || ${MK_CLANG} != "no" || \
${MK_LLD_BOOTSTRAP} != "no" || ${MK_LLD} != "no" || \
${MK_LLDB} != "no" || ${MK_LLVM_BINUTILS} != "no"
_clang_tblgen= \
lib/clang/libllvmminimal \
usr.bin/clang/llvm-tblgen
.if ${MK_CLANG_BOOTSTRAP} != "no" || ${MK_CLANG} != "no" || \
${MK_LLDB} != "no"
_clang_tblgen+= lib/clang/libclangminimal
_clang_tblgen+= usr.bin/clang/clang-tblgen
.endif
.if ${MK_LLDB} != "no"
_clang_tblgen+= usr.bin/clang/lldb-tblgen
.endif
${_bt}-usr.bin/clang/clang-tblgen: ${_bt}-lib/clang/libllvmminimal
${_bt}-usr.bin/clang/llvm-tblgen: ${_bt}-lib/clang/libllvmminimal
${_bt}-usr.bin/clang/lldb-tblgen: ${_bt}-lib/clang/libllvmminimal
.endif
# C.UTF-8 is always built in share/ctypes and we need localedef for that.
_localedef= usr.bin/localedef
${_bt}-usr.bin/localedef: ${_bt}-usr.bin/yacc ${_bt_lex_depend}
.if ${MK_ICONV} != "no"
_mkesdb= usr.bin/mkesdb
_mkcsmapper= usr.bin/mkcsmapper
${_bt}-usr.bin/mkesdb: ${_bt}-usr.bin/yacc ${_bt_lex_depend}
${_bt}-usr.bin/mkcsmapper: ${_bt}-usr.bin/yacc ${_bt_lex_depend}
.endif
.if ${MK_KERBEROS} != "no"
_kerberos5_bootstrap_tools= \
kerberos5/tools/make-roken \
kerberos5/lib/libroken \
kerberos5/lib/libvers \
kerberos5/tools/asn1_compile \
kerberos5/tools/slc \
usr.bin/compile_et
.ORDER: ${_kerberos5_bootstrap_tools:C/^/${_bt}-/g}
.for _tool in ${_kerberos5_bootstrap_tools}
${_bt}-${_tool}: ${_bt}-usr.bin/yacc ${_bt_lex_depend}
.endfor
.endif
${_bt}-usr.bin/mandoc: ${_bt}-lib/libopenbsd
# The tools listed in _basic_bootstrap_tools will generally not be
# bootstrapped unless BOOTSTRAP_ALL_TOOL is set. However, when building on a
# Linux or MacOS host the host versions are incompatible so we need to build
# them from the source tree. Usually the link name will be the same as the subdir,
# but some directories such as grep or test install multiple binaries. In that
# case we use the _basic_bootstrap_tools_multilink variable which is a list of
# subdirectory and comma-separated list of files.
_basic_bootstrap_tools_multilink=usr.bin/grep grep,egrep,fgrep
_basic_bootstrap_tools_multilink+=bin/test test,[
# bootstrap tools needed by buildworld:
_basic_bootstrap_tools+=usr.bin/cut bin/expr usr.bin/gencat usr.bin/join \
usr.bin/mktemp bin/realpath bin/rmdir usr.bin/sed usr.bin/sort \
usr.bin/truncate usr.bin/tsort
# Some build scripts use nawk instead of awk (this happens at least in
# cddl/contrib/opensolaris/lib/libdtrace/common/mknames.sh) so we need both awk
# and nawk in ${WORLDTMP}/legacy/bin.
_basic_bootstrap_tools_multilink+=usr.bin/awk awk,nawk
# file2c is required for building usr.sbin/config:
_basic_bootstrap_tools+=usr.bin/file2c
# uuencode/uudecode required for share/tabset
_basic_bootstrap_tools_multilink+=usr.bin/bintrans uuencode,uudecode
# xargs is required by mkioctls
_basic_bootstrap_tools+=usr.bin/xargs
# cap_mkdb is required for share/termcap:
_basic_bootstrap_tools+=usr.bin/cap_mkdb
# services_mkdb/pwd_mkdb are required for installworld:
_basic_bootstrap_tools+=usr.sbin/services_mkdb usr.sbin/pwd_mkdb
# ldd is required for installcheck (TODO: just always use /usr/bin/ldd instead?)
.if !defined(CROSSBUILD_HOST)
# ldd is only needed for updating the running system so we don't need to
# bootstrap ldd on non-FreeBSD systems
_basic_bootstrap_tools+=usr.bin/ldd
.endif
# sysctl/chflags are required for installkernel:
.if !defined(CROSSBUILD_HOST)
_basic_bootstrap_tools+=bin/chflags
# Note: sysctl does not bootstrap on FreeBSD < 13 anymore, but that doesn't
# matter since we don't need any of the new features for the build.
_bootstrap_tools_links+=sysctl
.else
# When building on non-FreeBSD, install a fake chflags instead since the
# version from the source tree cannot work. We also don't need sysctl since we
# are install with -DNO_ROOT.
_other_bootstrap_tools+=tools/build/cross-build/fake_chflags
.endif
# mkfifo is used by sys/conf/newvers.sh
_basic_bootstrap_tools+=usr.bin/mkfifo
# jot is needed for the mkimg tests
_basic_bootstrap_tools+=usr.bin/jot
.if ${MK_BOOT} != "no"
# md5 is used by boot/beri (and possibly others)
_basic_bootstrap_tools+=sbin/md5
.endif
# tzsetup is needed as an install tool
.if ${MK_ZONEINFO} != "no"
_basic_bootstrap_tools+=usr.sbin/tzsetup
.endif
.if defined(BOOTSTRAP_ALL_TOOLS)
_other_bootstrap_tools+=${_basic_bootstrap_tools}
.for _subdir _links in ${_basic_bootstrap_tools_multilink}
_other_bootstrap_tools+=${_subdir}
.endfor
${_bt}-usr.bin/awk: ${_bt_lex_depend} ${_bt}-usr.bin/yacc
${_bt}-bin/expr: ${_bt_lex_depend} ${_bt}-usr.bin/yacc
# If we are bootstrapping file2c, we have to build it before config:
${_bt}-usr.sbin/config: ${_bt}-usr.bin/file2c ${_bt_lex_depend}
# Note: no symlink to make/bmake in the !BOOTSTRAP_ALL_TOOLS case here since
# the links to make/bmake make links will have already have been created in the
# `make legacy` step. Not adding a link to make is important on non-FreeBSD
# since "make" will usually point to GNU make there.
_other_bootstrap_tools+=usr.bin/bmake
# Avoid dependency on host bz2 headers:
_other_bootstrap_tools+=lib/libbz2
${_bt}-usr.bin/grep: ${_bt}-lib/libbz2
# libdwarf depends on libz
_other_bootstrap_tools+=lib/libz
${_bt}-lib/libdwarf: ${_bt}-lib/libz
# libroken depends on libcrypt
_other_bootstrap_tools+=lib/libcrypt
${_bt}-lib/libroken: ${_bt}-lib/libcrypt
.else
# All tools in _basic_bootstrap_tools have the same name as the subdirectory
# so we can use :T to get the name of the symlinks that we need to create.
_bootstrap_tools_links+=${_basic_bootstrap_tools:T}
.for _subdir _links in ${_basic_bootstrap_tools_multilink}
_bootstrap_tools_links+=${_links:S/,/ /g}
.endfor
.endif # defined(BOOTSTRAP_ALL_TOOLS)
# Link the tools that we need for building but don't need to bootstrap because
# the host version is known to be compatible into ${WORLDTMP}/legacy
# We do this before building any of the bootstrap tools in case they depend on
# the presence of any of the links (e.g. as m4/lex/awk)
${_bt}-links: .PHONY
.for _tool in ${_bootstrap_tools_links}
${_bt}-link-${_tool}: .PHONY
@rm -f "${WORLDTMP}/legacy/bin/${_tool}"; \
source_path=`which ${_tool}`; \
if [ ! -e "$${source_path}" ] ; then \
echo "Cannot find host tool '${_tool}'"; false; \
fi; \
cp -pf "$${source_path}" "${WORLDTMP}/legacy/bin/${_tool}"
${_bt}-links: ${_bt}-link-${_tool}
.endfor
bootstrap-tools: ${_bt}-links .PHONY
# Please document (add comment) why something is in 'bootstrap-tools'.
# Try to bound the building of the bootstrap-tool to just the
# FreeBSD versions that need the tool built at this stage of the build.
.for _tool in \
${_clang_tblgen} \
${_kerberos5_bootstrap_tools} \
${_strfile} \
usr.bin/dtc \
${_cat} \
${_kbdcontrol} \
${_elftoolchain_libs} \
lib/libopenbsd \
usr.bin/mandoc \
usr.bin/rpcgen \
${_yacc} \
${_m4} \
${_lex} \
${_other_bootstrap_tools} \
usr.bin/xinstall \
${_gensnmptree} \
usr.sbin/config \
${_flua} \
${_crunchide} \
${_crunchgen} \
${_etdump} \
${_libnetbsd} \
${_libsbuf} \
${_makefs} \
${_mkimg} \
${_nmtree} \
${_vtfontcvt} \
${_localedef} \
${_mkcsmapper} \
${_mkesdb} \
${_zic} \
${LOCAL_BSTOOL_DIRS}
${_bt}-${_tool}: ${_bt}-links .PHONY .MAKE
${_+_}@${ECHODIR} "===> ${_tool} (obj,all,install)"; \
cd ${.CURDIR}/${_tool}; \
if [ -z "${NO_OBJWALK}" ]; then ${MAKE} DIRPRFX=${_tool}/ obj; fi; \
if [ "${_tool}" = "usr.bin/lex" ]; then \
${MAKE} DIRPRFX=${_tool}/ bootstrap; \
fi; \
${MAKE} DIRPRFX=${_tool}/ all; \
${MAKE} DIRPRFX=${_tool}/ DESTDIR=${WORLDTMP}/legacy install
bootstrap-tools: ${_bt}-${_tool}
.endfor
.if target(${_bt}-lib/libmd)
# If we are bootstrapping libmd (e.g. when building on macOS/Linux) add the
# necessary dependencies:
${_bt}-usr.bin/sort: ${_bt}-lib/libmd
${_bt}-usr.bin/xinstall: ${_bt}-lib/libmd
${_bt}-sbin/md5: ${_bt}-lib/libmd
.endif
#
# build-tools: Build special purpose build tools
#
.if !defined(NO_SHARE) && ${MK_SYSCONS} != "no"
_share= share/syscons/scrnmaps
.endif
.if ${MK_RESCUE} != "no"
# rescue includes programs that have build-tools targets
_rescue=rescue/rescue
.endif
.if ${MK_TCSH} != "no"
_tcsh=bin/csh
.endif
.if ${MK_FILE} != "no"
_libmagic=lib/libmagic
.endif
.if ${MK_PMC} != "no"
_jevents=lib/libpmc/pmu-events
.endif
# kernel-toolchain skips _cleanobj, so handle cleaning up previous
# build-tools directories if needed.
.if ${MK_CLEAN} == "yes" && make(kernel-toolchain)
_bt_clean= ${CLEANDIR}
.endif
.for _tool in \
${_tcsh} \
bin/sh \
${LOCAL_TOOL_DIRS} \
${_jevents} \
lib/ncurses/tinfo \
${_rescue} \
${_share} \
usr.bin/awk \
${_libmagic} \
usr.bin/vi/catalog
build-tools_${_tool}: .PHONY
${_+_}@${ECHODIR} "===> ${_tool} (${_bt_clean:D${_bt_clean},}obj,build-tools)"; \
cd ${.CURDIR}/${_tool}; \
if [ -n "${_bt_clean}" ]; then ${MAKE} DIRPRFX=${_tool}/ ${_bt_clean}; fi; \
if [ -z "${NO_OBJWALK}" ]; then ${MAKE} DIRPRFX=${_tool}/ obj; fi; \
${MAKE} DIRPRFX=${_tool}/ build-tools
build-tools: build-tools_${_tool}
.endfor
#
# kernel-tools: Build kernel-building tools
#
kernel-tools: .PHONY
mkdir -p ${WORLDTMP}/usr
${WORLDTMP_MTREE} -f ${.CURDIR}/etc/mtree/BSD.usr.dist \
-p ${WORLDTMP}/usr >/dev/null
#
# cross-tools: All the tools needed to build the rest of the system after
# we get done with the earlier stages. It is the last set of tools needed
# to begin building the target binaries.
#
.if ${TARGET_ARCH} != ${MACHINE_ARCH} || ${BUILD_WITH_STRICT_TMPPATH} != 0
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386"
_btxld= usr.sbin/btxld
.endif
.endif
# Rebuild ctfconvert and ctfmerge to avoid difficult-to-diagnose failures
# resulting from missing bug fixes or ELF Toolchain updates.
.if ${MK_CDDL} != "no"
_dtrace_tools= cddl/lib/libctf cddl/lib/libspl cddl/usr.bin/ctfconvert \
cddl/usr.bin/ctfmerge
.endif
# If we're given an XAS, don't build binutils.
.if ${XAS:M/*} == ""
.if ${MK_ELFTOOLCHAIN_BOOTSTRAP} != "no"
_elftctools= lib/libelftc \
lib/libpe \
usr.bin/elfctl \
usr.bin/elfdump \
usr.bin/objcopy \
usr.bin/nm \
usr.bin/size \
usr.bin/strings
# These are not required by the build, but can be useful for developers who
# cross-build on a FreeBSD 10 host:
_elftctools+= usr.bin/addr2line
.endif
.elif ${TARGET_ARCH} != ${MACHINE_ARCH} && ${MK_ELFTOOLCHAIN_BOOTSTRAP} != "no"
# If cross-building with an external binutils we still need to build strip for
# the target (for at least crunchide).
_elftctools= lib/libelftc \
lib/libpe \
usr.bin/elfctl \
usr.bin/elfdump \
usr.bin/objcopy
.endif
.if ${MK_CLANG_BOOTSTRAP} != "no"
_clang= usr.bin/clang
.endif
.if ${MK_LLD_BOOTSTRAP} != "no"
_lld= usr.bin/clang/lld
.endif
.if ${MK_CLANG_BOOTSTRAP} != "no" || ${MK_LLD_BOOTSTRAP} != "no"
_clang_libs= lib/clang
.endif
.if ${MK_USB} != "no"
_usb_tools= stand/usb/tools
.endif
.if ${BUILD_WITH_STRICT_TMPPATH} != 0 || defined(BOOTSTRAP_ALL_TOOLS)
_ar=usr.bin/ar
.endif
cross-tools: .MAKE .PHONY
.for _tool in \
${LOCAL_XTOOL_DIRS} \
${_ar} \
${_clang_libs} \
${_clang} \
${_lld} \
${_elftctools} \
${_dtrace_tools} \
${_btxld} \
${_usb_tools}
${_+_}@${ECHODIR} "===> ${_tool} (obj,all,install)"; \
cd ${.CURDIR}/${_tool}; \
if [ -z "${NO_OBJWALK}" ]; then ${MAKE} DIRPRFX=${_tool}/ obj; fi; \
${MAKE} DIRPRFX=${_tool}/ all; \
${MAKE} DIRPRFX=${_tool}/ DESTDIR=${WORLDTMP} install
.endfor
#
# native-xtools is the current target for qemu-user cross builds of ports
# via poudriere and the imgact_binmisc kernel module.
# This target merely builds a toolchan/sysroot, then builds the tools it wants
# with the options it wants in a special MAKEOBJDIRPREFIX, using the toolchain
# already built. It then installs the static tools to NXBDESTDIR for Poudriere
# to pickup.
#
NXBOBJROOT= ${OBJROOT}${MACHINE}.${MACHINE_ARCH}/nxb/
NXBOBJTOP= ${NXBOBJROOT}${NXB_TARGET}.${NXB_TARGET_ARCH}
NXTP?= /nxb-bin
.if ${NXTP:N/*}
.error NXTP variable should be an absolute path
.endif
NXBDESTDIR?= ${DESTDIR}${NXTP}
# This is the list of tools to be built/installed as static and where
# appropriate to build for the given TARGET.TARGET_ARCH.
NXBDIRS+= \
bin/cat \
bin/chmod \
bin/cp \
${_tcsh} \
bin/echo \
bin/expr \
bin/hostname \
bin/ln \
bin/ls \
bin/mkdir \
bin/mv \
bin/ps \
bin/realpath \
bin/rm \
bin/rmdir \
bin/sh \
bin/sleep \
sbin/md5 \
sbin/sysctl \
usr.bin/addr2line \
usr.bin/ar \
usr.bin/awk \
usr.bin/basename \
usr.bin/bmake \
usr.bin/bzip2 \
usr.bin/cmp \
usr.bin/diff \
usr.bin/dirname \
usr.bin/objcopy \
usr.bin/env \
usr.bin/fetch \
usr.bin/find \
usr.bin/grep \
usr.bin/gzip \
usr.bin/head \
usr.bin/id \
usr.bin/lex \
usr.bin/limits \
usr.bin/mandoc \
usr.bin/mktemp \
usr.bin/mt \
usr.bin/nm \
usr.bin/patch \
usr.bin/readelf \
usr.bin/sed \
usr.bin/size \
usr.bin/sort \
usr.bin/strings \
usr.bin/tar \
usr.bin/touch \
usr.bin/tr \
usr.bin/true \
usr.bin/uniq \
usr.bin/unzip \
usr.bin/wc \
usr.bin/xargs \
usr.bin/xinstall \
usr.bin/xz \
usr.bin/yacc \
usr.sbin/chown
SUBDIR_DEPEND_usr.bin/clang= lib/clang
.if ${MK_CLANG} != "no"
NXBDIRS+= lib/clang
NXBDIRS+= usr.bin/clang
.endif
# XXX: native-xtools passes along ${NXBDIRS} in SUBDIR_OVERRIDE that needs
# to be evaluated after NXBDIRS is set.
.if make(install) && !empty(SUBDIR_OVERRIDE)
SUBDIR= ${SUBDIR_OVERRIDE}
.endif
NXBMAKEARGS+= \
OBJTOP=${NXBOBJTOP:Q} \
OBJROOT=${NXBOBJROOT:Q} \
-DNO_SHARED \
-DNO_CPU_CFLAGS \
-DNO_PIC \
MK_CASPER=no \
MK_CLANG_EXTRAS=no \
MK_CLANG_FORMAT=no \
MK_CLANG_FULL=no \
MK_CTF=no \
MK_DEBUG_FILES=no \
MK_HTML=no \
MK_LLDB=no \
MK_MAN=no \
MK_MAN_UTILS=yes \
MK_OFED=no \
MK_OPENSSH=no \
MK_PROFILE=no \
MK_RETPOLINE=no \
MK_SENDMAIL=no \
MK_SSP=no \
MK_TESTS=no \
MK_WERROR=no \
MK_ZFS=no
NXBMAKEENV+= \
MAKEOBJDIRPREFIX=
# This should match all of the knobs in lib/Makefile that it takes to avoid
# descending into lib/clang!
NXBTNOTOOLS= MK_CLANG=no MK_LLD=no MK_LLDB=no MK_LLVM_BINUTILS=no
.if make(native-xtools*) && \
(!defined(NXB_TARGET) || !defined(NXB_TARGET_ARCH))
.error Missing NXB_TARGET / NXB_TARGET_ARCH
.endif
# For 'toolchain' we want to produce native binaries that themselves generate
# native binaries.
NXBTMAKE= ${NXBMAKEENV} ${MAKE} ${NXBMAKEARGS:N-DNO_PIC:N-DNO_SHARED} \
TARGET=${MACHINE} TARGET_ARCH=${MACHINE_ARCH}
# For 'everything' we want to produce native binaries (hence -target to
# be MACHINE) that themselves generate TARGET.TARGET_ARCH binaries.
# TARGET/TARGET_ARCH are still passed along from user.
#
# Use the toolchain we create as an external toolchain.
.if ${USING_SYSTEM_COMPILER} == "yes" || ${XCC:N${CCACHE_BIN}:M/*}
NXBMAKE+= XCC="${XCC}" \
XCXX="${XCXX}" \
XCPP="${XCPP}"
.else
NXBMAKE+= XCC="${NXBOBJTOP}/tmp/usr/bin/cc" \
XCXX="${NXBOBJTOP}/tmp/usr/bin/c++" \
XCPP="${NXBOBJTOP}/tmp/usr/bin/cpp"
.endif
NXBMAKE+= ${NXBMAKEENV} ${MAKE} -f Makefile.inc1 ${NXBMAKEARGS} \
MACHINE=${MACHINE} MACHINE_ARCH=${MACHINE_ARCH} \
TARGET=${NXB_TARGET} TARGET_ARCH=${NXB_TARGET_ARCH} \
TARGET_TRIPLE=${MACHINE_TRIPLE:Q}
# NXBDIRS is improperly based on MACHINE rather than NXB_TARGET. Need to
# invoke a sub-make to reevaluate MK_CLANG, etc, for NXBDIRS.
NXBMAKE+= SUBDIR_OVERRIDE='$${NXBDIRS:M*}'
# Need to avoid the -isystem logic when using clang as an external toolchain
# even if the TARGET being built for wants GCC.
NXBMAKE+= WANT_COMPILER_TYPE='$${X_COMPILER_TYPE}'
native-xtools: .PHONY
${_+_}cd ${.CURDIR}; ${NXBTMAKE} _cleanobj
# Build the bootstrap/host/cross tools that produce native binaries
${_+_}cd ${.CURDIR}; ${NXBTMAKE} kernel-toolchain
# Populate includes/libraries sysroot that produce native binaries.
# This is split out from 'toolchain' above mostly so that target LLVM
# libraries have a proper LLVM_DEFAULT_TARGET_TRIPLE without
# polluting the cross-compiler build. The LLVM libs are skipped
# here to avoid the problem but are kept in 'toolchain' so that
# needed build tools are built.
${_+_}cd ${.CURDIR}; ${NXBTMAKE} _includes ${NXBTNOTOOLS}
${_+_}cd ${.CURDIR}; ${NXBTMAKE} _libraries ${NXBTNOTOOLS}
.if !defined(NO_OBJWALK)
${_+_}cd ${.CURDIR}; ${NXBMAKE} _obj
.endif
${_+_}cd ${.CURDIR}; ${NXBMAKE} everything
@echo ">> native-xtools done. Use 'make native-xtools-install' to install to a given DESTDIR"
native-xtools-install: .PHONY
mkdir -p ${NXBDESTDIR}/bin ${NXBDESTDIR}/sbin ${NXBDESTDIR}/usr
${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.usr.dist \
-p ${NXBDESTDIR}/usr >/dev/null
${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.include.dist \
-p ${NXBDESTDIR}/usr/include >/dev/null
.for d in ${LIBCOMPAT_INCLUDE_DIRS}
mkdir -p ${NXBDESTDIR}/usr/include/${d}
.endfor
${_+_}cd ${.CURDIR}; ${NXBMAKE} \
DESTDIR=${NXBDESTDIR} \
-DNO_ROOT \
install
#
# hierarchy - ensure that all the needed directories are present
#
hierarchy hier: .MAKE .PHONY
${_+_}cd ${.CURDIR}/etc; ${HMAKE} distrib-dirs
#
# libraries - build all libraries, and install them under ${DESTDIR}.
#
# The list of libraries with dependents (${_prebuild_libs}) and their
# interdependencies (__L) are built automatically by the
# ${.CURDIR}/tools/make_libdeps.sh script.
#
libraries: .MAKE .PHONY
${_+_}cd ${.CURDIR}; \
${MAKE} -f Makefile.inc1 _prereq_libs; \
${MAKE} -f Makefile.inc1 _startup_libs; \
${MAKE} -f Makefile.inc1 _prebuild_libs; \
${MAKE} -f Makefile.inc1 _generic_libs
#
# static libgcc.a prerequisite for shared libc
#
_prereq_libs= lib/libcompiler_rt
.if ${MK_SSP} != "no"
_prereq_libs+= lib/libssp_nonshared
.endif
.if ${MK_ASAN} != "no"
_prereq_libs+= lib/libclang_rt/asan
_prereq_libs+= lib/libclang_rt/asan-preinit
_prereq_libs+= lib/libclang_rt/asan_cxx
.endif
.if ${MK_UBSAN} != "no"
_prereq_libs+= lib/libclang_rt/ubsan_minimal
_prereq_libs+= lib/libclang_rt/ubsan_standalone
_prereq_libs+= lib/libclang_rt/ubsan_standalone_cxx
.endif
# These dependencies are not automatically generated:
#
# lib/csu and lib/libc must be built before
# all shared libraries for ELF.
#
_startup_libs= lib/csu
_startup_libs+= lib/libc
_startup_libs+= lib/libc_nonshared
_startup_libs+= lib/libcxxrt
_prereq_libs+= lib/libgcc_eh lib/libgcc_s
_startup_libs+= lib/libgcc_eh lib/libgcc_s
lib/libgcc_s__L: lib/libc__L
lib/libgcc_s__L: lib/libc_nonshared__L
lib/libcxxrt__L: lib/libgcc_s__L
_prebuild_libs= ${_kerberos5_lib_libasn1} \
${_kerberos5_lib_libhdb} \
${_kerberos5_lib_libheimbase} \
${_kerberos5_lib_libheimntlm} \
${_libsqlite3} \
${_kerberos5_lib_libheimipcc} \
${_kerberos5_lib_libhx509} ${_kerberos5_lib_libkrb5} \
${_kerberos5_lib_libroken} \
${_kerberos5_lib_libwind} \
lib/libbz2 ${_libcom_err} lib/libcrypt \
lib/libc++ \
lib/libelf lib/libexpat \
lib/libfigpar \
${_lib_libgssapi} \
lib/libjail \
lib/libkiconv lib/libkvm lib/liblzma lib/libmd lib/libnv \
lib/libzstd \
${_lib_casper} \
lib/ncurses/tinfo \
lib/ncurses/ncurses \
lib/ncurses/form \
lib/libpam/libpam lib/libthr \
${_lib_libradius} lib/libsbuf lib/libtacplus \
lib/libgeom \
+ ${_lib_librt} \
${_cddl_lib_libumem} ${_cddl_lib_libnvpair} \
${_cddl_lib_libuutil} \
${_cddl_lib_libavl} \
${_cddl_lib_libicp} \
${_cddl_lib_libicp_rescue} \
${_cddl_lib_libspl} \
${_cddl_lib_libtpool} \
${_cddl_lib_libzfs_core} ${_cddl_lib_libzfs} \
${_cddl_lib_libzutil} \
${_cddl_lib_libctf} ${_cddl_lib_libzfsbootenv} \
lib/libufs \
lib/libutil lib/libpjdlog ${_lib_libypclnt} lib/libz lib/msun \
${_secure_lib_libcrypto} ${_secure_lib_libssl} \
${_lib_libldns} ${_secure_lib_libssh}
.if ${MK_DIALOG} != "no"
_prebuild_libs+= gnu/lib/libdialog
gnu/lib/libdialog__L: lib/msun__L lib/ncurses/tinfo__L lib/ncurses/ncurses__L
.endif
.if ${MK_GOOGLETEST} != "no"
_prebuild_libs+= lib/libregex
.endif
lib/libgeom__L: lib/libexpat__L lib/libsbuf__L
lib/libkvm__L: lib/libelf__L
.if ${MK_RADIUS_SUPPORT} != "no"
_lib_libradius= lib/libradius
.endif
lib/ncurses/ncurses__L: lib/ncurses/tinfo__L
lib/ncurses/form__L: lib/ncurses/ncurses__L
.if ${MK_OFED} != "no"
_prebuild_libs+= \
lib/ofed/libibverbs \
lib/ofed/libibmad \
lib/ofed/libibumad \
lib/ofed/complib \
lib/ofed/libmlx5
lib/ofed/libibmad__L: lib/ofed/libibumad__L
lib/ofed/complib__L: lib/libthr__L
lib/ofed/libmlx5__L: lib/ofed/libibverbs__L lib/libthr__L
.endif
.if ${MK_CASPER} != "no"
_lib_casper= lib/libcasper
.endif
lib/libpjdlog__L: lib/libutil__L
lib/libcasper__L: lib/libnv__L
lib/liblzma__L: lib/libmd__L lib/libthr__L
lib/libzstd__L: lib/libthr__L
+lib/librt__L: lib/libthr__L
_generic_libs= ${_cddl_lib} gnu/lib ${_kerberos5_lib} lib ${_secure_lib}
.if ${MK_IPFILTER} != "no"
_generic_libs+= sbin/ipf/libipf
.endif
.for _DIR in ${LOCAL_LIB_DIRS}
.if ${_DIR} == ".WAIT" || (empty(_generic_libs:M${_DIR}) && exists(${.CURDIR}/${_DIR}/Makefile))
_generic_libs+= ${_DIR}
.endif
.endfor
lib/libtacplus__L: lib/libmd__L lib/libpam/libpam__L
.if ${MK_CDDL} != "no"
_cddl_lib_libumem= cddl/lib/libumem
_cddl_lib_libnvpair= cddl/lib/libnvpair
_cddl_lib_libavl= cddl/lib/libavl
_cddl_lib_libuutil= cddl/lib/libuutil
_cddl_lib_libspl= cddl/lib/libspl
cddl/lib/libavl__L: cddl/lib/libspl__L
cddl/lib/libnvpair__L: cddl/lib/libspl__L
cddl/lib/libuutil__L: cddl/lib/libavl__L cddl/lib/libspl__L
.if ${MK_ZFS} != "no"
+_lib_librt= lib/librt
_cddl_lib_libicp= cddl/lib/libicp
_cddl_lib_libicp_rescue= cddl/lib/libicp_rescue
_cddl_lib_libtpool= cddl/lib/libtpool
_cddl_lib_libzutil= cddl/lib/libzutil
_cddl_lib_libzfs_core= cddl/lib/libzfs_core
_cddl_lib_libzfs= cddl/lib/libzfs
_cddl_lib_libzfsbootenv= cddl/lib/libzfsbootenv
cddl/lib/libtpool__L: cddl/lib/libspl__L
cddl/lib/libzutil__L: cddl/lib/libavl__L lib/libgeom__L lib/msun__L cddl/lib/libtpool__L
cddl/lib/libzfs_core__L: cddl/lib/libnvpair__L cddl/lib/libspl__L cddl/lib/libzutil__L
-cddl/lib/libzfs__L: cddl/lib/libzfs_core__L lib/msun__L lib/libutil__L
+cddl/lib/libzfs__L: cddl/lib/libzfs_core__L lib/msun__L lib/libutil__L lib/librt__L
cddl/lib/libzfs__L: lib/libthr__L lib/libmd__L lib/libz__L cddl/lib/libumem__L
cddl/lib/libzfs__L: cddl/lib/libuutil__L cddl/lib/libavl__L lib/libgeom__L
cddl/lib/libzfs__L: cddl/lib/libnvpair__L cddl/lib/libzutil__L
cddl/lib/libzfs__L: secure/lib/libcrypto__L
cddl/lib/libzfsbootenv__L: cddl/lib/libzfs__L
lib/libbe__L: cddl/lib/libzfs__L cddl/lib/libzfsbootenv__L
.endif
_cddl_lib_libctf= cddl/lib/libctf
_cddl_lib= cddl/lib
cddl/lib/libctf__L: lib/libz__L cddl/lib/libspl__L
.endif
# cddl/lib/libdtrace requires lib/libproc and lib/librtld_db
_prebuild_libs+= lib/libprocstat lib/libproc lib/librtld_db
lib/libprocstat__L: lib/libelf__L lib/libkvm__L lib/libutil__L
lib/libproc__L: lib/libprocstat__L
lib/librtld_db__L: lib/libprocstat__L
.if ${MK_CRYPT} != "no"
.if ${MK_OPENSSL} != "no"
_secure_lib_libcrypto= secure/lib/libcrypto
_secure_lib_libssl= secure/lib/libssl
lib/libradius__L secure/lib/libssl__L: secure/lib/libcrypto__L
secure/lib/libcrypto__L: lib/libthr__L
.if ${MK_LDNS} != "no"
_lib_libldns= lib/libldns
lib/libldns__L: secure/lib/libssl__L
.endif
.if ${MK_OPENSSH} != "no"
_secure_lib_libssh= secure/lib/libssh
secure/lib/libssh__L: lib/libz__L secure/lib/libcrypto__L lib/libcrypt__L
.if ${MK_LDNS} != "no"
secure/lib/libssh__L: lib/libldns__L
.endif
.if ${MK_GSSAPI} != "no" && ${MK_KERBEROS_SUPPORT} != "no"
secure/lib/libssh__L: lib/libgssapi__L kerberos5/lib/libkrb5__L \
kerberos5/lib/libhx509__L kerberos5/lib/libasn1__L lib/libcom_err__L \
lib/libmd__L kerberos5/lib/libroken__L
.endif
.endif
.endif
_secure_lib= secure/lib
.endif
.if ${MK_KERBEROS} != "no"
kerberos5/lib/libasn1__L: lib/libcom_err__L kerberos5/lib/libroken__L
kerberos5/lib/libhdb__L: kerberos5/lib/libasn1__L lib/libcom_err__L \
kerberos5/lib/libkrb5__L kerberos5/lib/libroken__L \
kerberos5/lib/libwind__L lib/libsqlite3__L
kerberos5/lib/libheimntlm__L: secure/lib/libcrypto__L kerberos5/lib/libkrb5__L \
kerberos5/lib/libroken__L lib/libcom_err__L
kerberos5/lib/libhx509__L: kerberos5/lib/libasn1__L lib/libcom_err__L \
secure/lib/libcrypto__L kerberos5/lib/libroken__L kerberos5/lib/libwind__L
kerberos5/lib/libkrb5__L: kerberos5/lib/libasn1__L lib/libcom_err__L \
lib/libcrypt__L secure/lib/libcrypto__L kerberos5/lib/libhx509__L \
kerberos5/lib/libroken__L kerberos5/lib/libwind__L \
kerberos5/lib/libheimbase__L kerberos5/lib/libheimipcc__L
kerberos5/lib/libroken__L: lib/libcrypt__L
kerberos5/lib/libwind__L: kerberos5/lib/libroken__L lib/libcom_err__L
kerberos5/lib/libheimbase__L: lib/libthr__L
kerberos5/lib/libheimipcc__L: kerberos5/lib/libroken__L kerberos5/lib/libheimbase__L lib/libthr__L
.endif
lib/libsqlite3__L: lib/libthr__L
.if ${MK_GSSAPI} != "no"
_lib_libgssapi= lib/libgssapi
.endif
.if ${MK_KERBEROS} != "no"
_kerberos5_lib= kerberos5/lib
_kerberos5_lib_libasn1= kerberos5/lib/libasn1
_kerberos5_lib_libhdb= kerberos5/lib/libhdb
_kerberos5_lib_libheimbase= kerberos5/lib/libheimbase
_kerberos5_lib_libkrb5= kerberos5/lib/libkrb5
_kerberos5_lib_libhx509= kerberos5/lib/libhx509
_kerberos5_lib_libroken= kerberos5/lib/libroken
_kerberos5_lib_libheimntlm= kerberos5/lib/libheimntlm
_libsqlite3= lib/libsqlite3
_kerberos5_lib_libheimipcc= kerberos5/lib/libheimipcc
_kerberos5_lib_libwind= kerberos5/lib/libwind
_libcom_err= lib/libcom_err
.endif
.if ${MK_NIS} != "no"
_lib_libypclnt= lib/libypclnt
.endif
.if ${MK_OPENSSL} == "no"
lib/libradius__L: lib/libmd__L
.endif
lib/libproc__L: \
${_cddl_lib_libctf:D${_cddl_lib_libctf}__L} \
lib/libelf__L lib/librtld_db__L lib/libutil__L lib/libcxxrt__L
.for _lib in ${_prereq_libs}
${_lib}__PL: .PHONY .MAKE
.if !defined(_MKSHOWCONFIG) && exists(${.CURDIR}/${_lib})
${_+_}@${ECHODIR} "===> ${_lib} (obj,all,install)"; \
cd ${.CURDIR}/${_lib}; \
if [ -z "${NO_OBJWALK}" ]; then ${MAKE} MK_TESTS=no DIRPRFX=${_lib}/ obj; fi; \
${MAKE} MK_TESTS=no MK_PROFILE=no -DNO_PIC \
DIRPRFX=${_lib}/ all; \
${MAKE} MK_TESTS=no MK_PROFILE=no -DNO_PIC \
DIRPRFX=${_lib}/ install
.endif
.endfor
.for _lib in ${_startup_libs} ${_prebuild_libs} ${_generic_libs}
${_lib}__L: .PHONY .MAKE
.if !defined(_MKSHOWCONFIG) && exists(${.CURDIR}/${_lib})
${_+_}@${ECHODIR} "===> ${_lib} (obj,all,install)"; \
cd ${.CURDIR}/${_lib}; \
if [ -z "${NO_OBJWALK}" ]; then ${MAKE} MK_TESTS=no DIRPRFX=${_lib}/ obj; fi; \
${MAKE} MK_TESTS=no DIRPRFX=${_lib}/ all; \
${MAKE} MK_TESTS=no DIRPRFX=${_lib}/ install
.endif
.endfor
_prereq_libs: ${_prereq_libs:S/$/__PL/}
_startup_libs: ${_startup_libs:S/$/__L/}
_prebuild_libs: ${_prebuild_libs:S/$/__L/}
_generic_libs: ${_generic_libs:S/$/__L/}
# Enable SUBDIR_PARALLEL when not calling 'make all', unless called from
# 'everything' with _PARALLEL_SUBDIR_OK set. This is because it is unlikely
# that running 'make all' from the top-level, especially with a SUBDIR_OVERRIDE
# or LOCAL_DIRS set, will have a reliable build if SUBDIRs are built in
# parallel. This is safe for the world stage of buildworld though since it has
# already built libraries in a proper order and installed includes into
# WORLDTMP. Special handling is done for SUBDIR ordering for 'install*' to
# avoid trashing a system if it crashes mid-install.
.if !make(all) || defined(_PARALLEL_SUBDIR_OK)
SUBDIR_PARALLEL=
.endif
.include <bsd.subdir.mk>
.if make(check-old) || make(check-old-dirs) || \
make(check-old-files) || make(check-old-libs) || \
make(delete-old) || make(delete-old-dirs) || \
make(delete-old-files) || make(delete-old-libs) || \
make(list-old-dirs) || make(list-old-files) || make(list-old-libs)
#
# check for / delete old files section
#
.include "ObsoleteFiles.inc"
OLD_LIBS_MESSAGE="Please be sure no application still uses those libraries, \
else you can not start such an application. Consult UPDATING for more \
information regarding how to cope with the removal/revision bump of a \
specific library."
.if !defined(BATCH_DELETE_OLD_FILES)
RM_I=-i
.else
RM_I=-fv
.endif
list-old-files: .PHONY
@cd ${.CURDIR}; \
${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \
-V "OLD_FILES:ts\n" -V "OLD_FILES:Musr/share/*.gz:R:ts\n" \
${_ALL_libcompats:@v@-V "OLD_FILES:Mlib/*.so.*:S,^lib,usr/lib$v,:ts\n"@} \
${_ALL_libcompats:@v@-V "OLD_FILES:Musr/lib/*:S,^usr/lib,usr/lib$v,:ts\n"@} | \
sort
delete-old-files: .PHONY
@echo ">>> Removing old files (only deletes safe to delete libs)"
# Ask for every old file if the user really wants to remove it.
# It's annoying, but better safe than sorry.
# NB: We cannot pass the list of OLD_FILES as a parameter because the
# argument list will get too long. Using .for/.endfor make "loops" will make
# the Makefile parser segfault.
@exec 3<&0; \
cd ${.CURDIR}; \
${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} list-old-files | \
while read file; do \
if [ -f "${DESTDIR}/$${file}" -o -L "${DESTDIR}/$${file}" ]; then \
chflags noschg "${DESTDIR}/$${file}" 2>/dev/null || true; \
rm ${RM_I} "${DESTDIR}/$${file}" <&3; \
fi; \
for ext in debug symbols; do \
if ! [ -e "${DESTDIR}/$${file}" ] && [ -f \
"${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" ]; then \
rm ${RM_I} "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" \
<&3; \
fi; \
done; \
done
# Remove catpages without corresponding manpages.
@exec 3<&0; \
find ${DESTDIR}/usr/share/man/cat* ! -type d 2>/dev/null | sort | \
sed -ep -e's:${DESTDIR}/usr/share/man/cat:${DESTDIR}/usr/share/man/man:' | \
while read catpage; do \
read manpage; \
if [ ! -e "$${manpage}" ]; then \
rm ${RM_I} $${catpage} <&3; \
fi; \
done
@echo ">>> Old files removed"
check-old-files: .PHONY
@echo ">>> Checking for old files"
@cd ${.CURDIR}; \
${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} list-old-files | \
while read file; do \
if [ -f "${DESTDIR}/$${file}" -o -L "${DESTDIR}/$${file}" ]; then \
echo "${DESTDIR}/$${file}"; \
fi; \
for ext in debug symbols; do \
if [ -f "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" ]; then \
echo "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}"; \
fi; \
done; \
done
# Check for catpages without corresponding manpages.
@find ${DESTDIR}/usr/share/man/cat* ! -type d 2>/dev/null | \
sed -ep -e's:${DESTDIR}/usr/share/man/cat:${DESTDIR}/usr/share/man/man:' | \
while read catpage; do \
read manpage; \
if [ ! -e "$${manpage}" ]; then \
echo $${catpage}; \
fi; \
done | sort
list-old-libs: .PHONY
@cd ${.CURDIR}; \
${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \
-V "OLD_LIBS:ts\n" -V "MOVED_LIBS:ts\n" \
${_ALL_libcompats:@v@-V "OLD_LIBS:Mlib/*:S,^lib,usr/lib$v,:ts\n"@} \
${_ALL_libcompats:@v@-V "OLD_LIBS:Musr/lib/*:S,^usr/lib,usr/lib$v,:ts\n"@} \
${_ALL_libcompats:@v@-V "OLD_LIBS:Mlib/casper/*:S,^lib/casper,usr/lib$v,:ts\n"@} | \
sort
delete-old-libs: .PHONY
@echo ">>> Removing old libraries"
@echo "${OLD_LIBS_MESSAGE}" | fmt
@exec 3<&0; \
cd ${.CURDIR}; \
${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} list-old-libs | \
while read file; do \
if [ -f "${DESTDIR}/$${file}" -o -L "${DESTDIR}/$${file}" ]; then \
chflags noschg "${DESTDIR}/$${file}" 2>/dev/null || true; \
rm ${RM_I} "${DESTDIR}/$${file}" <&3; \
fi; \
for ext in debug symbols; do \
if ! [ -e "${DESTDIR}/$${file}" ] && [ -f \
"${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" ]; then \
rm ${RM_I} "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" \
<&3; \
fi; \
done; \
done
@echo ">>> Old libraries removed"
check-old-libs: .PHONY
@echo ">>> Checking for old libraries"
@cd ${.CURDIR}; \
${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} list-old-libs | \
while read file; do \
if [ -f "${DESTDIR}/$${file}" -o -L "${DESTDIR}/$${file}" ]; then \
echo "${DESTDIR}/$${file}"; \
fi; \
for ext in debug symbols; do \
if [ -f "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" ]; then \
echo "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}"; \
fi; \
done; \
done
list-old-dirs: .PHONY
@cd ${.CURDIR}; \
${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \
-V OLD_DIRS | sed -E 's/[[:space:]]+/\n/g' | sort -r
delete-old-dirs: .PHONY
@echo ">>> Removing old directories"
@cd ${.CURDIR}; \
${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} list-old-dirs | \
while read dir; do \
if [ -d "${DESTDIR}/$${dir}" ]; then \
rmdir -v "${DESTDIR}/$${dir}" || true; \
elif [ -L "${DESTDIR}/$${dir}" ]; then \
echo "${DESTDIR}/$${dir} is a link, please remove everything manually."; \
fi; \
if [ -d "${DESTDIR}${DEBUGDIR}/$${dir}" ]; then \
rmdir -v "${DESTDIR}${DEBUGDIR}/$${dir}" || true; \
elif [ -L "${DESTDIR}${DEBUGDIR}/$${dir}" ]; then \
echo "${DESTDIR}${DEBUGDIR}/$${dir} is a link, please remove everything manually."; \
fi; \
done
@echo ">>> Old directories removed"
check-old-dirs: .PHONY
@echo ">>> Checking for old directories"
@cd ${.CURDIR}; \
${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} list-old-dirs | \
while read dir; do \
if [ -d "${DESTDIR}/$${dir}" ]; then \
echo "${DESTDIR}/$${dir}"; \
elif [ -L "${DESTDIR}/$${dir}" ]; then \
echo "${DESTDIR}/$${dir} is a link, please remove everything manually."; \
fi; \
if [ -d "${DESTDIR}${DEBUGDIR}/$${dir}" ]; then \
echo "${DESTDIR}${DEBUGDIR}/$${dir}"; \
elif [ -L "${DESTDIR}${DEBUGDIR}/$${dir}" ]; then \
echo "${DESTDIR}${DEBUGDIR}/$${dir} is a link, please remove everything manually."; \
fi; \
done
delete-old: delete-old-files delete-old-dirs .PHONY
@echo "To remove old libraries run '${MAKE_CMD} delete-old-libs'."
check-old: check-old-files check-old-libs check-old-dirs .PHONY
@echo "To remove old files and directories run '${MAKE_CMD} delete-old'."
@echo "To remove old libraries run '${MAKE_CMD} delete-old-libs'."
.endif
#
# showconfig - show build configuration.
#
# ignore lower case knobs (not for use with WITH*)
#
showconfig: .PHONY
@(${MAKE} -n -f ${.CURDIR}/sys/conf/kern.opts.mk -V dummy -dg1 UPDATE_DEPENDFILE=no NO_OBJ=yes MACHINE=${TARGET} MACHINE_ARCH=${TARGET_ARCH}; \
${MAKE} -n -f ${.CURDIR}/share/mk/src.opts.mk -V dummy -dg1 UPDATE_DEPENDFILE=no NO_OBJ=yes) 2>&1 | grep '^MK_[A-Z]' | sort -u
.if !empty(KRNLOBJDIR) && !empty(KERNCONF)
DTBOUTPUTPATH= ${KRNLOBJDIR}/${KERNCONF}/
.if !defined(FDT_DTS_FILE) || empty(FDT_DTS_FILE)
.if !defined(_MKSHOWCONFIG) && exists(${KERNCONFDIR}/${KERNCONF})
FDT_DTS_FILE!= awk 'BEGIN {FS="="} /^makeoptions[[:space:]]+FDT_DTS_FILE/ {print $$2}' \
'${KERNCONFDIR}/${KERNCONF}' ; echo
.endif
.endif
.endif
.if !defined(DTBOUTPUTPATH) || !exists(${DTBOUTPUTPATH})
DTBOUTPUTPATH= ${.CURDIR}
.endif
#
# Build 'standalone' Device Tree Blob
#
builddtb: .PHONY
@PATH=${TMPPATH:Q} MACHINE=${TARGET} \
sh ${.CURDIR}/sys/tools/fdt/make_dtb.sh ${.CURDIR}/sys \
"${FDT_DTS_FILE}" ${DTBOUTPUTPATH}
###############
# cleanworld
# In the following, the first 'rm' in a series will usually remove all
# files and directories. If it does not, then there are probably some
# files with file flags set, so this unsets them and tries the 'rm' a
# second time. There are situations where this target will be cleaning
# some directories via more than one method, but that duplication is
# needed to correctly handle all the possible situations. Removing all
# files without file flags set in the first 'rm' instance saves time,
# because 'chflags' will need to operate on fewer files afterwards.
#
# It is expected that BW_CANONICALOBJDIR == the CANONICALOBJDIR as would be
# created by bsd.obj.mk, except that we don't want to .include that file
# in this makefile. We don't do a cleandir walk if MK_AUTO_OBJ is yes
# since it is not possible for files to land in the wrong place.
#
.if make(cleanworld)
BW_CANONICALOBJDIR:=${OBJTOP}/
.elif make(cleankernel)
BW_CANONICALOBJDIR:=${KRNLOBJDIR}/${KERNCONF}/
.elif make(cleanuniverse)
BW_CANONICALOBJDIR:=${OBJROOT}
.if ${MK_UNIFIED_OBJDIR} == "no"
.error ${.TARGETS} only supported with WITH_UNIFIED_OBJDIR enabled.
.endif
.endif
cleanworld cleanuniverse cleankernel: .PHONY
.if !empty(BW_CANONICALOBJDIR) && exists(${BW_CANONICALOBJDIR}) && \
${.CURDIR:tA} != ${BW_CANONICALOBJDIR:tA}
-(cd ${BW_CANONICALOBJDIR} && rm -rf *)
-chflags -R 0 ${BW_CANONICALOBJDIR}
-(cd ${BW_CANONICALOBJDIR} && rm -rf *)
.endif
.if make(cleanworld) && ${MK_AUTO_OBJ} == "no" && \
(empty(BW_CANONICALOBJDIR) || ${.CURDIR:tA} == ${BW_CANONICALOBJDIR:tA})
.if ${.CURDIR} == ${.OBJDIR} || ${.CURDIR}/obj == ${.OBJDIR}
# To be safe in this case, fall back to a 'make cleandir'
${_+_}@cd ${.CURDIR}; ${MAKE} cleandir
.endif
.endif
.if ${TARGET} == ${MACHINE} && ${TARGET_ARCH} == ${MACHINE_ARCH}
XDEV_CPUTYPE?=${CPUTYPE}
.else
XDEV_CPUTYPE?=${TARGET_CPUTYPE}
.endif
NOFUN=-DNO_FSCHG MK_HTML=no -DNO_LINT \
MK_MAN=no MK_NLS=no MK_PROFILE=no \
MK_KERBEROS=no MK_RESCUE=no MK_TESTS=no MK_WERROR=no \
TARGET=${TARGET} TARGET_ARCH=${TARGET_ARCH} \
CPUTYPE=${XDEV_CPUTYPE}
XDDIR=${TARGET_ARCH}-freebsd
XDTP?=/usr/${XDDIR}
.if ${XDTP:N/*}
.error XDTP variable should be an absolute path
.endif
CDBOBJROOT= ${OBJROOT}${MACHINE}.${MACHINE_ARCH}/xdev/
CDBOBJTOP= ${CDBOBJROOT}${XDDIR}
CDBENV= \
INSTALL="sh ${.CURDIR}/tools/install.sh"
CDENV= ${CDBENV} \
TOOLS_PREFIX=${XDTP}
CDMAKEARGS= \
OBJTOP=${CDBOBJTOP:Q} \
OBJROOT=${CDBOBJROOT:Q}
CD2MAKEARGS= ${CDMAKEARGS}
.if ${WANT_COMPILER_TYPE} == gcc || \
(defined(X_COMPILER_TYPE) && ${X_COMPILER_TYPE} == gcc)
# GCC requires -isystem and -L when using a cross-compiler. --sysroot
# won't set header path and -L is used to ensure the base library path
# is added before the port PREFIX library path.
CD2CFLAGS+= -isystem ${XDDESTDIR}/usr/include -L${XDDESTDIR}/usr/lib
# GCC requires -B to find /usr/lib/crti.o when using a cross-compiler
# combined with --sysroot.
CD2CFLAGS+= -B${XDDESTDIR}/usr/lib
# Force using libc++ for external GCC.
.if defined(X_COMPILER_TYPE) && \
${X_COMPILER_TYPE} == gcc && ${X_COMPILER_VERSION} >= 40800
CD2CXXFLAGS+= -isystem ${XDDESTDIR}/usr/include/c++/v1 -std=c++11 \
-nostdinc++
.endif
.endif
CD2CFLAGS+= --sysroot=${XDDESTDIR}/
CD2ENV=${CDENV} CC="${CC} ${CD2CFLAGS}" CXX="${CXX} ${CD2CXXFLAGS} ${CD2CFLAGS}" \
CPP="${CPP} ${CD2CFLAGS}" \
MACHINE=${TARGET} MACHINE_ARCH=${TARGET_ARCH}
CDTMP= ${OBJTOP}/${XDDIR}/tmp
CDMAKE=${CDENV} PATH=${CDTMP:Q}/usr/bin:${PATH:Q} ${MAKE} ${CDMAKEARGS} ${NOFUN}
CD2MAKE=${CD2ENV} PATH=${CDTMP:Q}/usr/bin:${XDDESTDIR:Q}/usr/bin:${PATH:Q} \
${MAKE} ${CD2MAKEARGS} ${NOFUN}
.if ${MK_META_MODE} != "no"
# Don't rebuild build-tools targets during normal build.
CD2MAKE+= BUILD_TOOLS_META=.NOMETA
.endif
XDDESTDIR=${DESTDIR}${XDTP}
.ORDER: xdev-build xdev-install xdev-links
xdev: xdev-build xdev-install .PHONY
.ORDER: _xb-worldtmp _xb-bootstrap-tools _xb-build-tools _xb-cross-tools
xdev-build: _xb-worldtmp _xb-bootstrap-tools _xb-build-tools _xb-cross-tools .PHONY
_xb-worldtmp: .PHONY
mkdir -p ${CDTMP}/usr
${WORLDTMP_MTREE} -f ${.CURDIR}/etc/mtree/BSD.usr.dist \
-p ${CDTMP}/usr >/dev/null
_xb-bootstrap-tools: .PHONY
.for _tool in \
${_clang_tblgen} \
${_yacc}
${_+_}@${ECHODIR} "===> ${_tool} (obj,all,install)"; \
cd ${.CURDIR}/${_tool}; \
if [ -z "${NO_OBJWALK}" ]; then ${CDMAKE} DIRPRFX=${_tool}/ obj; fi; \
${CDMAKE} DIRPRFX=${_tool}/ all; \
${CDMAKE} DIRPRFX=${_tool}/ DESTDIR=${CDTMP} install
.endfor
_xb-build-tools: .PHONY
${_+_}@cd ${.CURDIR}; \
${CDBENV} ${MAKE} ${CDMAKEARGS} -f Makefile.inc1 ${NOFUN} build-tools
XDEVDIRS= \
${_clang_libs} \
${_lld} \
${_elftctools} \
usr.bin/ar \
${_clang}
_xb-cross-tools: .PHONY
.for _tool in ${XDEVDIRS}
${_+_}@${ECHODIR} "===> xdev ${_tool} (obj,all)"; \
cd ${.CURDIR}/${_tool}; \
if [ -z "${NO_OBJWALK}" ]; then ${CDMAKE} DIRPRFX=${_tool}/ obj; fi; \
${CDMAKE} DIRPRFX=${_tool}/ all
.endfor
_xi-mtree: .PHONY
${_+_}@${ECHODIR} "mtree populating ${XDDESTDIR}"
mkdir -p ${XDDESTDIR}
${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.root.dist \
-p ${XDDESTDIR} >/dev/null
${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.usr.dist \
-p ${XDDESTDIR}/usr >/dev/null
${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.include.dist \
-p ${XDDESTDIR}/usr/include >/dev/null
.for d in ${LIBCOMPAT_INCLUDE_DIRS}
mkdir -p ${XDDESTDIR}/usr/include/${d}
.endfor
.for libcompat in ${libcompats}
${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.lib${libcompat}.dist \
-p ${XDDESTDIR}/usr >/dev/null
.endfor
.if ${MK_TESTS} != "no"
mkdir -p ${XDDESTDIR}${TESTSBASE}
${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.tests.dist \
-p ${XDDESTDIR}${TESTSBASE} >/dev/null
.endif
.ORDER: xdev-build _xi-mtree _xi-cross-tools _xi-includes _xi-libraries
xdev-install: xdev-build _xi-mtree _xi-cross-tools _xi-includes _xi-libraries .PHONY
_xi-cross-tools: .PHONY
@echo "_xi-cross-tools"
.for _tool in ${XDEVDIRS}
${_+_}@${ECHODIR} "===> xdev ${_tool} (install)"; \
cd ${.CURDIR}/${_tool}; \
${CDMAKE} DIRPRFX=${_tool}/ install DESTDIR=${XDDESTDIR}
.endfor
_xi-includes: .PHONY
.if !defined(NO_OBJWALK)
${_+_}cd ${.CURDIR}; ${CD2MAKE} -f Makefile.inc1 _obj \
DESTDIR=${XDDESTDIR}
.endif
${_+_}cd ${.CURDIR}; ${CD2MAKE} -f Makefile.inc1 includes \
DESTDIR=${XDDESTDIR}
_xi-libraries: .PHONY
${_+_}cd ${.CURDIR}; ${CD2MAKE} -f Makefile.inc1 libraries \
DESTDIR=${XDDESTDIR}
xdev-links: .PHONY
${_+_}cd ${XDDESTDIR}/usr/bin; \
mkdir -p ../../../../usr/bin; \
for i in *; do \
ln -sf ../../${XDTP}/usr/bin/$$i \
../../../../usr/bin/${XDDIR}-$$i; \
ln -sf ../../${XDTP}/usr/bin/$$i \
../../../../usr/bin/${XDDIR}${_REVISION}-$$i; \
done
diff --git a/ObsoleteFiles.inc b/ObsoleteFiles.inc
index 655a81cd900f..e8f6e2e1693c 100644
--- a/ObsoleteFiles.inc
+++ b/ObsoleteFiles.inc
@@ -1,17895 +1,17898 @@
#
#
# This file lists old files (OLD_FILES), libraries (OLD_LIBS, MOVED_LIBS)
# and directories (OLD_DIRS) which should get removed after an update.
# Recently removed entries should be listed first (with the date as a
# comment). OLD_LIBS and MOVED_LIBS should only list dynamic libraries.
# Static libraries, links to dynamic libraries (lib*.so), and linker scripts
# should be listed in OLD_FILES. OLD_LIBS and MOVED_LIBS are removed by the
# delete-old-libs target, whereas OLD_FILES and OLD_DIRS are removed by the
# delete-old target. This separation allows users to avoid deleting old
# dynamic libraries still required by existing binaries.
#
# MOVED_LIBS should be used instead of OLD_LIBS when a library is moved
# from usr/lib to lib or vice versa. This avoids removing libraries for
# alternate ABIs (such as lib32) which store all libraries in a single
# directory (e.g. usr/lib32).
#
# For files listed in OLD_FILES, OLD_LIBS, and MOVED_LIBS, the check-old*
# and delete-old* targets will also delete associated debug symbols from
# usr/lib/debug.
#
# In case of a complete directory hierarchy the sorting is in depth first
# order.
#
# Files that are installed or removed depending on some build option
# should be listed in /usr/src/tools/build/mk/OptionalObsoleteFiles.inc
# instead of in this file.
#
# Before you commit changes to this file please check if any entries in
# tools/build/mk/OptionalObsoleteFiles.inc can be removed. The following
# command tells which files are listed more than once regardless of some
# architecture specific conditionals, so you can not blindly trust the
# output:
# ( grep '+=' /usr/src/ObsoleteFiles.inc | sort -u ; \
# grep '+=' /usr/src/tools/build/mk/OptionalObsoleteFiles.inc | sort -u) | \
# sort | uniq -d
#
# To find regular duplicates not dependent on optional components, you can
# also use something that will not give you false positives, e.g.:
# for t in `make -V TARGETS universe`; do
# __MAKE_CONF=/dev/null make -f Makefile.inc1 TARGET=$t \
# -V OLD_FILES -V OLD_LIBS -V MOVED_LIBS -V OLD_DIRS check-old | \
# xargs -n1 | sort | uniq -d;
# done
#
# For optional components, you can use the following to see if some entries
# in OptionalObsoleteFiles.inc have been obsoleted by ObsoleteFiles.inc
# for o in tools/build/options/WITH*; do
# __MAKE_CONF=/dev/null make -f Makefile.inc1 -D${o##*/} \
# -V OLD_FILES -V OLD_LIBS -V MOVED_LIBS -V OLD_DIRS check-old | \
# xargs -n1 | sort | uniq -d;
# done
+# 20230902: libzfs now requires librt, moved to /lib
+MOVED_LIBS+=usr/lib/librt.so.1
+
# 20230807: GoogleTest 1.14.0 upgrade.
OLD_FILES+=usr/include/private/gmock/gmock-generated-actions.h
OLD_FILES+=usr/include/private/gmock/gmock-generated-function-mockers.h
OLD_FILES+=usr/include/private/gmock/gmock-generated-matchers.h
OLD_FILES+=usr/include/private/gmock/gmock-generated-nice-strict.h
OLD_FILES+=usr/include/private/gmock/internal/gmock-generated-internal-utils.h
OLD_FILES+=usr/include/private/gtest/internal/gtest-linked_ptr.h
OLD_FILES+=usr/include/private/gtest/internal/gtest-param-util-generated.h
OLD_FILES+=usr/include/private/gtest/internal/gtest-tuple.h
OLD_FILES+=usr/tests/lib/googletest/gmock_main/gmock-generated-actions_test
OLD_FILES+=usr/tests/lib/googletest/gmock_main/gmock-generated-function-mockers_test
OLD_FILES+=usr/tests/lib/googletest/gmock_main/gmock-generated-internal-utils_test
OLD_FILES+=usr/tests/lib/googletest/gmock_main/gmock-generated-matchers_test
OLD_FILES+=usr/tests/lib/googletest/gmock_main/gmock-matchers_test
OLD_FILES+=usr/tests/lib/googletest/gtest_main/googletest-linked-ptr-test
# 20230807: Removal of the ath(4) AHB bus-frontend for MIPS
OLD_FILES+=usr/share/man/man4/ath_ahb.4
OLD_FILES+=usr/share/man/man4/ath_pci.4
OLD_FILES+=usr/share/man/man4/if_ath_pci.4
# 20230803: Removal of support for cloning pseudo interfaces from iflib(9)
OLD_FILES+=usr/include/net/iflib_private.h
# 20230802: Remove a copy of libdtrace.so installed to the wrong path
MOVED_LIBS+=usr/lib/libdtrace.so.2
# 20230726: Removal of support for the VTOC8 partitioning scheme
OLD_FILES+=usr/include/sys/disk/vtoc.h
OLD_FILES+=usr/include/sys/vtoc.h
# 20230724: Remove unneeded arm headers
.if ${TARGET_ARCH} == "arm"
OLD_FILES+=usr/include/machine/atomic-v6.h
OLD_FILES+=usr/include/machine/cpu-v6.h
OLD_FILES+=usr/include/machine/pmap-v6.h
OLD_FILES+=usr/include/machine/pte-v6.h
OLD_FILES+=usr/include/machine/swi.h
.endif
# 20230714: gsignal
OLD_FILES+=usr/share/man/man9/gsignal.9.gz
# 20230626: Only install sys/dev/nvme/nvme.h to /usr/include
OLD_FILES+=usr/include/dev/nvme/nvme_private.h
# 20230623: OpenSSL 3.0.9
OLD_LIBS+=lib/libcrypto.so.111
OLD_FILES+=usr/include/openssl/rand_drbg.h
OLD_LIBS+=usr/lib/engines/capi.so
OLD_LIBS+=usr/lib/engines/padlock.so
OLD_LIBS+=usr/lib/libssl.so.111
OLD_DIRS+=usr/lib/engines
.for libcompat in ${_ALL_libcompats}
OLD_DIRS+=usr/lib${libcompat}/engines
.endfor
OLD_FILES+=usr/share/man/man1/list.1
OLD_FILES+=usr/share/man/man1/openssl-tsget.1
OLD_FILES+=usr/share/man/man3/ECDH_get_ex_data.3
OLD_FILES+=usr/share/man/man3/ECDH_get_ex_new_index.3
OLD_FILES+=usr/share/man/man3/ECDH_set_ex_data.3
OLD_FILES+=usr/share/man/man3/ERR_GET_FUNC.3
OLD_FILES+=usr/share/man/man3/EVP_PKEY_CTX_hkdf_mode.3
OLD_FILES+=usr/share/man/man3/EVP_PKEY_set_alias_type.3
OLD_FILES+=usr/share/man/man3/EVP_aes.3
OLD_FILES+=usr/share/man/man3/EVP_aria.3
OLD_FILES+=usr/share/man/man3/EVP_camellia.3
OLD_FILES+=usr/share/man/man3/EVP_des.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_bytes.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_cleanup_entropy_fn.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_cleanup_nonce_fn.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_free.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_generate.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_get0_master.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_get0_private.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_get0_public.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_get_entropy_fn.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_get_ex_data.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_get_ex_new_index.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_get_nonce_fn.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_instantiate.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_new.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_reseed.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_secure_new.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_set.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_set_callbacks.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_set_defaults.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_set_ex_data.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_set_reseed_defaults.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_set_reseed_interval.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_set_reseed_time_interval.3
OLD_FILES+=usr/share/man/man3/RAND_DRBG_uninstantiate.3
OLD_FILES+=usr/share/man/man3/RSA_padding_add_SSLv23.3
OLD_FILES+=usr/share/man/man3/RSA_padding_check_SSLv23.3
OLD_FILES+=usr/share/man/man7/RAND_DRBG.7
OLD_FILES+=usr/share/man/man7/scrypt.7
# 20230622: new clang import which bumps version from 15.0.7 to 16.0.1
OLD_FILES+=usr/lib/clang/15.0.7/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/15.0.7/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/15.0.7/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/15.0.7/include/__clang_cuda_device_functions.h
OLD_FILES+=usr/lib/clang/15.0.7/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/15.0.7/include/__clang_cuda_libdevice_declares.h
OLD_FILES+=usr/lib/clang/15.0.7/include/__clang_cuda_math.h
OLD_FILES+=usr/lib/clang/15.0.7/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/15.0.7/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/15.0.7/include/__clang_cuda_texture_intrinsics.h
OLD_FILES+=usr/lib/clang/15.0.7/include/__clang_hip_cmath.h
OLD_FILES+=usr/lib/clang/15.0.7/include/__clang_hip_libdevice_declares.h
OLD_FILES+=usr/lib/clang/15.0.7/include/__clang_hip_math.h
OLD_FILES+=usr/lib/clang/15.0.7/include/__clang_hip_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/15.0.7/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/15.0.7/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/15.0.7/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/15.0.7/include/adxintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/altivec.h
OLD_FILES+=usr/lib/clang/15.0.7/include/ammintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/amxintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/arm64intr.h
OLD_FILES+=usr/lib/clang/15.0.7/include/arm_acle.h
OLD_FILES+=usr/lib/clang/15.0.7/include/arm_bf16.h
OLD_FILES+=usr/lib/clang/15.0.7/include/arm_cde.h
OLD_FILES+=usr/lib/clang/15.0.7/include/arm_cmse.h
OLD_FILES+=usr/lib/clang/15.0.7/include/arm_fp16.h
OLD_FILES+=usr/lib/clang/15.0.7/include/arm_mve.h
OLD_FILES+=usr/lib/clang/15.0.7/include/arm_neon.h
OLD_FILES+=usr/lib/clang/15.0.7/include/arm_neon_sve_bridge.h
OLD_FILES+=usr/lib/clang/15.0.7/include/arm_sve.h
OLD_FILES+=usr/lib/clang/15.0.7/include/armintr.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512bf16intrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512fp16intrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512vlbf16intrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512vlfp16intrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512vlvp2intersectintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512vp2intersectintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avxintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/avxvnniintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/builtins.h
OLD_FILES+=usr/lib/clang/15.0.7/include/cet.h
OLD_FILES+=usr/lib/clang/15.0.7/include/cetintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/cldemoteintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/cpuid.h
OLD_FILES+=usr/lib/clang/15.0.7/include/crc32intrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/cuda_wrappers/algorithm
OLD_FILES+=usr/lib/clang/15.0.7/include/cuda_wrappers/complex
OLD_FILES+=usr/lib/clang/15.0.7/include/cuda_wrappers/new
OLD_DIRS+=usr/lib/clang/15.0.7/include/cuda_wrappers
OLD_FILES+=usr/lib/clang/15.0.7/include/emmintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/enqcmdintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/float.h
OLD_FILES+=usr/lib/clang/15.0.7/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/fuzzer/FuzzedDataProvider.h
OLD_DIRS+=usr/lib/clang/15.0.7/include/fuzzer
OLD_FILES+=usr/lib/clang/15.0.7/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/hexagon_circ_brev_intrinsics.h
OLD_FILES+=usr/lib/clang/15.0.7/include/hexagon_protos.h
OLD_FILES+=usr/lib/clang/15.0.7/include/hexagon_types.h
OLD_FILES+=usr/lib/clang/15.0.7/include/hlsl/hlsl_basic_types.h
OLD_FILES+=usr/lib/clang/15.0.7/include/hlsl/hlsl_intrinsics.h
OLD_DIRS+=usr/lib/clang/15.0.7/include/hlsl
OLD_FILES+=usr/lib/clang/15.0.7/include/hlsl.h
OLD_FILES+=usr/lib/clang/15.0.7/include/hresetintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/htmintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/hvx_hexagon_protos.h
OLD_FILES+=usr/lib/clang/15.0.7/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/immintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/intrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/inttypes.h
OLD_FILES+=usr/lib/clang/15.0.7/include/invpcidintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/iso646.h
OLD_FILES+=usr/lib/clang/15.0.7/include/keylockerintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/limits.h
OLD_FILES+=usr/lib/clang/15.0.7/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/15.0.7/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/15.0.7/include/mmintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/module.modulemap
OLD_FILES+=usr/lib/clang/15.0.7/include/movdirintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/msa.h
OLD_FILES+=usr/lib/clang/15.0.7/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/omp-tools.h
OLD_FILES+=usr/lib/clang/15.0.7/include/omp.h
OLD_FILES+=usr/lib/clang/15.0.7/include/ompt.h
OLD_FILES+=usr/lib/clang/15.0.7/include/opencl-c-base.h
OLD_FILES+=usr/lib/clang/15.0.7/include/opencl-c.h
OLD_FILES+=usr/lib/clang/15.0.7/include/openmp_wrappers/__clang_openmp_device_functions.h
OLD_FILES+=usr/lib/clang/15.0.7/include/openmp_wrappers/cmath
OLD_FILES+=usr/lib/clang/15.0.7/include/openmp_wrappers/complex
OLD_FILES+=usr/lib/clang/15.0.7/include/openmp_wrappers/complex.h
OLD_FILES+=usr/lib/clang/15.0.7/include/openmp_wrappers/complex_cmath.h
OLD_FILES+=usr/lib/clang/15.0.7/include/openmp_wrappers/math.h
OLD_FILES+=usr/lib/clang/15.0.7/include/openmp_wrappers/new
OLD_DIRS+=usr/lib/clang/15.0.7/include/openmp_wrappers
OLD_FILES+=usr/lib/clang/15.0.7/include/pconfigintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/ppc_wrappers/bmi2intrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/ppc_wrappers/bmiintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/ppc_wrappers/emmintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/ppc_wrappers/immintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/ppc_wrappers/mm_malloc.h
OLD_FILES+=usr/lib/clang/15.0.7/include/ppc_wrappers/mmintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/ppc_wrappers/pmmintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/ppc_wrappers/smmintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/ppc_wrappers/tmmintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/ppc_wrappers/x86gprintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/ppc_wrappers/x86intrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/ppc_wrappers/xmmintrin.h
OLD_DIRS+=usr/lib/clang/15.0.7/include/ppc_wrappers
OLD_FILES+=usr/lib/clang/15.0.7/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/profile/InstrProfData.inc
OLD_FILES+=usr/lib/clang/15.0.7/include/profile/MemProfData.inc
OLD_DIRS+=usr/lib/clang/15.0.7/include/profile
OLD_FILES+=usr/lib/clang/15.0.7/include/ptwriteintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/rdpruintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/riscv_vector.h
OLD_FILES+=usr/lib/clang/15.0.7/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/s390intrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/15.0.7/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/15.0.7/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/15.0.7/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/15.0.7/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/15.0.7/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/15.0.7/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/15.0.7/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/15.0.7/include/sanitizer/memprof_interface.h
OLD_FILES+=usr/lib/clang/15.0.7/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/15.0.7/include/sanitizer/netbsd_syscall_hooks.h
OLD_FILES+=usr/lib/clang/15.0.7/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/15.0.7/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/15.0.7/include/sanitizer/tsan_interface_atomic.h
OLD_FILES+=usr/lib/clang/15.0.7/include/sanitizer/ubsan_interface.h
OLD_DIRS+=usr/lib/clang/15.0.7/include/sanitizer
OLD_FILES+=usr/lib/clang/15.0.7/include/serializeintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/sgxintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/shaintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/smmintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/stdalign.h
OLD_FILES+=usr/lib/clang/15.0.7/include/stdarg.h
OLD_FILES+=usr/lib/clang/15.0.7/include/stdatomic.h
OLD_FILES+=usr/lib/clang/15.0.7/include/stdbool.h
OLD_FILES+=usr/lib/clang/15.0.7/include/stddef.h
OLD_FILES+=usr/lib/clang/15.0.7/include/stdint.h
OLD_FILES+=usr/lib/clang/15.0.7/include/stdnoreturn.h
OLD_FILES+=usr/lib/clang/15.0.7/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/tgmath.h
OLD_FILES+=usr/lib/clang/15.0.7/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/tsxldtrkintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/uintrintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/unwind.h
OLD_FILES+=usr/lib/clang/15.0.7/include/vadefs.h
OLD_FILES+=usr/lib/clang/15.0.7/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/varargs.h
OLD_FILES+=usr/lib/clang/15.0.7/include/vecintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/velintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/velintrin_approx.h
OLD_FILES+=usr/lib/clang/15.0.7/include/velintrin_gen.h
OLD_FILES+=usr/lib/clang/15.0.7/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/waitpkgintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/wasm_simd128.h
OLD_FILES+=usr/lib/clang/15.0.7/include/wbnoinvdintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/x86gprintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/x86intrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/xopintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/xray/xray_interface.h
OLD_FILES+=usr/lib/clang/15.0.7/include/xray/xray_log_interface.h
OLD_FILES+=usr/lib/clang/15.0.7/include/xray/xray_records.h
OLD_DIRS+=usr/lib/clang/15.0.7/include/xray
OLD_FILES+=usr/lib/clang/15.0.7/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/15.0.7/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/15.0.7/include
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan-aarch64.so
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan-arm.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan-arm.so
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan-armhf.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan-armhf.so
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan-preinit-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan-preinit-arm.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan-preinit-armhf.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan_cxx-arm.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan_cxx-armhf.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan_static-i386.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.asan_static-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.cfi-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.cfi-arm.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.cfi-armhf.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.cfi-i386.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.cfi-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.cfi_diag-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.cfi_diag-arm.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.cfi_diag-armhf.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.cfi_diag-i386.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.cfi_diag-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.dd-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.dd-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.fuzzer-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.fuzzer-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.fuzzer_interceptors-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.fuzzer_no_main-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.fuzzer_no_main-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.msan-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.msan-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.msan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.profile-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.profile-powerpc.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.profile-powerpc64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.profile-powerpc64le.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.safestack-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.stats-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.stats-arm.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.stats-armhf.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.stats_client-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.stats_client-arm.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.stats_client-armhf.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.tsan-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.tsan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.ubsan_minimal-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.ubsan_minimal-arm.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.ubsan_minimal-armhf.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.ubsan_standalone-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.ubsan_standalone-arm.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.ubsan_standalone-armhf.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.ubsan_standalone_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.ubsan_standalone_cxx-arm.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.ubsan_standalone_cxx-armhf.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.xray-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.xray-arm.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.xray-armhf.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.xray-basic-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.xray-basic-arm.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.xray-basic-armhf.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.xray-basic-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.xray-fdr-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.xray-fdr-arm.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.xray-fdr-armhf.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.xray-fdr-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.xray-profiling-aarch64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.xray-profiling-arm.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.xray-profiling-armhf.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.xray-profiling-x86_64.a
OLD_FILES+=usr/lib/clang/15.0.7/lib/freebsd/libclang_rt.xray-x86_64.a
OLD_DIRS+=usr/lib/clang/15.0.7/lib/freebsd
OLD_DIRS+=usr/lib/clang/15.0.7/lib
OLD_FILES+=usr/lib/clang/15.0.7/share/asan_ignorelist.txt
OLD_FILES+=usr/lib/clang/15.0.7/share/cfi_ignorelist.txt
OLD_FILES+=usr/lib/clang/15.0.7/share/msan_ignorelist.txt
OLD_DIRS+=usr/lib/clang/15.0.7/share
OLD_DIRS+=usr/lib/clang/15.0.7
# 20230622: new libc++ import which bumps version from 15.0.7 to 16.0.1
OLD_FILES+=usr/include/c++/v1/__bits
OLD_FILES+=usr/include/c++/v1/__tuple
OLD_FILES+=usr/include/c++/v1/__utility/transaction.h
# 20230614: caroot bundle updated
OLD_FILES+=usr/share/certs/trusted/Cybertrust_Global_Root.pem
OLD_FILES+=usr/share/certs/trusted/DST_Root_CA_X3.pem
OLD_FILES+=usr/share/certs/trusted/GlobalSign_Root_CA_-_R2.pem
OLD_FILES+=usr/share/certs/trusted/Hellenic_Academic_and_Research_Institutions_RootCA_2011.pem
OLD_FILES+=usr/share/certs/trusted/Network_Solutions_Certificate_Authority.pem
OLD_FILES+=usr/share/certs/trusted/Staat_der_Nederlanden_EV_Root_CA.pem
OLD_FILES+=usr/share/certs/trusted/TrustCor_ECA-1.pem
OLD_FILES+=usr/share/certs/trusted/TrustCor_RootCert_CA-1.pem
OLD_FILES+=usr/share/certs/trusted/TrustCor_RootCert_CA-2.pem
# 20230505: md5 tests are now self-contained
OLD_FILES+=usr/tests/sbin/md5/1.inp
OLD_FILES+=usr/tests/sbin/md5/1.sha512-p.chk
OLD_FILES+=usr/tests/sbin/md5/1.sha512sum-p.chk
OLD_FILES+=usr/tests/sbin/md5/2.inp
OLD_FILES+=usr/tests/sbin/md5/2.sha512-p.chk
OLD_FILES+=usr/tests/sbin/md5/2.sha512sum-p.chk
OLD_FILES+=usr/tests/sbin/md5/3.inp
OLD_FILES+=usr/tests/sbin/md5/3.sha512-p.chk
OLD_FILES+=usr/tests/sbin/md5/3.sha512sum-p.chk
OLD_FILES+=usr/tests/sbin/md5/4.inp
OLD_FILES+=usr/tests/sbin/md5/4.sha512-p.chk
OLD_FILES+=usr/tests/sbin/md5/4.sha512sum-p.chk
OLD_FILES+=usr/tests/sbin/md5/5.inp
OLD_FILES+=usr/tests/sbin/md5/5.sha512-p.chk
OLD_FILES+=usr/tests/sbin/md5/5.sha512sum-p.chk
OLD_FILES+=usr/tests/sbin/md5/6.inp
OLD_FILES+=usr/tests/sbin/md5/6.sha512-p.chk
OLD_FILES+=usr/tests/sbin/md5/6.sha512sum-p.chk
OLD_FILES+=usr/tests/sbin/md5/7.inp
OLD_FILES+=usr/tests/sbin/md5/7.sha512-p.chk
OLD_FILES+=usr/tests/sbin/md5/7.sha512sum-p.chk
OLD_FILES+=usr/tests/sbin/md5/8.inp
OLD_FILES+=usr/tests/sbin/md5/8.sha512-p.chk
OLD_FILES+=usr/tests/sbin/md5/8.sha512sum-p.chk
OLD_FILES+=usr/tests/sbin/md5/algorithms.txt
OLD_FILES+=usr/tests/sbin/md5/bsd-c-test
OLD_FILES+=usr/tests/sbin/md5/bsd-p-test
OLD_FILES+=usr/tests/sbin/md5/bsd-s-test
OLD_FILES+=usr/tests/sbin/md5/coreutils-c-test
OLD_FILES+=usr/tests/sbin/md5/md5.digest
OLD_FILES+=usr/tests/sbin/md5/md5sum.digest
OLD_FILES+=usr/tests/sbin/md5/rmd160.digest
OLD_FILES+=usr/tests/sbin/md5/rmd160sum.digest
OLD_FILES+=usr/tests/sbin/md5/self-test
OLD_FILES+=usr/tests/sbin/md5/self-test.md5.chk
OLD_FILES+=usr/tests/sbin/md5/self-test.rmd160.chk
OLD_FILES+=usr/tests/sbin/md5/self-test.sh_inp
OLD_FILES+=usr/tests/sbin/md5/self-test.sha1.chk
OLD_FILES+=usr/tests/sbin/md5/self-test.sha224.chk
OLD_FILES+=usr/tests/sbin/md5/self-test.sha256.chk
OLD_FILES+=usr/tests/sbin/md5/self-test.sha384.chk
OLD_FILES+=usr/tests/sbin/md5/self-test.sha512.chk
OLD_FILES+=usr/tests/sbin/md5/self-test.sha512t224.chk
OLD_FILES+=usr/tests/sbin/md5/self-test.sha512t256.chk
OLD_FILES+=usr/tests/sbin/md5/self-test.skein1024.chk
OLD_FILES+=usr/tests/sbin/md5/self-test.skein256.chk
OLD_FILES+=usr/tests/sbin/md5/self-test.skein512.chk
OLD_FILES+=usr/tests/sbin/md5/sha1.digest
OLD_FILES+=usr/tests/sbin/md5/sha1sum.digest
OLD_FILES+=usr/tests/sbin/md5/sha224.digest
OLD_FILES+=usr/tests/sbin/md5/sha224sum.digest
OLD_FILES+=usr/tests/sbin/md5/sha256.digest
OLD_FILES+=usr/tests/sbin/md5/sha256sum.digest
OLD_FILES+=usr/tests/sbin/md5/sha384.digest
OLD_FILES+=usr/tests/sbin/md5/sha384sum.digest
OLD_FILES+=usr/tests/sbin/md5/sha512.digest
OLD_FILES+=usr/tests/sbin/md5/sha512sum.digest
OLD_FILES+=usr/tests/sbin/md5/sha512t224.digest
OLD_FILES+=usr/tests/sbin/md5/sha512t224sum.digest
OLD_FILES+=usr/tests/sbin/md5/sha512t256.digest
OLD_FILES+=usr/tests/sbin/md5/sha512t256sum.digest
OLD_FILES+=usr/tests/sbin/md5/skein1024.digest
OLD_FILES+=usr/tests/sbin/md5/skein1024sum.digest
OLD_FILES+=usr/tests/sbin/md5/skein256.digest
OLD_FILES+=usr/tests/sbin/md5/skein256sum.digest
OLD_FILES+=usr/tests/sbin/md5/skein512.digest
OLD_FILES+=usr/tests/sbin/md5/skein512sum.digest
OLD_FILES+=usr/tests/sbin/md5/sum_a.in
OLD_FILES+=usr/tests/sbin/md5/sum_b.in
OLD_FILES+=usr/tests/sbin/md5/sum_c.in
OLD_FILES+=usr/tests/sbin/md5/sum_sums.digest
# 20230420: portsnap.8 removed
OLD_FILES+=etc/portsnap.conf
OLD_FILES+=usr/libexec/make_index
OLD_FILES+=usr/sbin/portsnap
OLD_FILES+=usr/share/examples/etc/portsnap.conf
OLD_FILES+=usr/share/man/man8/portsnap.8.gz
# 20230331: libpcap updated to 1.10.3
OLD_FILES+=usr/include/fmtutils.h
# 20230329: libcasper libraries moved to /lib
MOVED_LIBS+=lib/casper/libcap_dns.so.2
MOVED_LIBS+=lib/casper/libcap_fileargs.so.1
MOVED_LIBS+=lib/casper/libcap_grp.so.1
MOVED_LIBS+=lib/casper/libcap_net.so.1
MOVED_LIBS+=lib/casper/libcap_netdb.so.1
MOVED_LIBS+=lib/casper/libcap_pwd.so.1
MOVED_LIBS+=lib/casper/libcap_sysctl.so.1
MOVED_LIBS+=lib/casper/libcap_sysctl.so.2
MOVED_LIBS+=lib/casper/libcap_syslog.so.1
OLD_DIRS+=lib/casper
# 20230324: libvmmapi shlib version bumped to 6
OLD_LIBS+=usr/lib/libvmmapi.so.5
# 20230323: tzdata 2023a imported
OLD_FILES+=usr/share/zoneinfo/posixrules
# 20230320: vcount.9 removed
OLD_FILES+=usr/share/man/man9/count_dev.9.gz
OLD_FILES+=usr/share/man/man9/vcount.9.gz
# 20230320: case-sensitive conflicts removed
OLD_FILES+=usr/tests/bin/pkill/pgrep-F_test
OLD_FILES+=usr/tests/bin/pkill/pgrep-LF_test
OLD_FILES+=usr/tests/bin/pkill/pgrep-P_test
OLD_FILES+=usr/tests/bin/pkill/pgrep-U_test
OLD_FILES+=usr/tests/bin/pkill/pkill-F_test
OLD_FILES+=usr/tests/bin/pkill/pkill-LF_test
OLD_FILES+=usr/tests/bin/pkill/pkill-P_test
OLD_FILES+=usr/tests/bin/pkill/pkill-U_test
# 20230309: remove remaining ATM support
OLD_FILES+=usr/bin/sscop
OLD_FILES+=usr/include/netgraph/atm/ng_ccatm.h
OLD_FILES+=usr/include/netgraph/atm/ng_sscfu.h
OLD_FILES+=usr/include/netgraph/atm/ng_sscop.h
OLD_FILES+=usr/include/netgraph/atm/ng_uni.h
OLD_FILES+=usr/include/netgraph/atm/ngatmbase.h
OLD_FILES+=usr/include/netgraph/ng_atmllc.h
OLD_FILES+=usr/include/netnatm/addr.h
OLD_FILES+=usr/include/netnatm/api/atmapi.h
OLD_FILES+=usr/include/netnatm/api/ccatm.h
OLD_FILES+=usr/include/netnatm/api/unisap.h
OLD_DIRS+=usr/include/netnatm/api
OLD_FILES+=usr/include/netnatm/msg/uni_config.h
OLD_FILES+=usr/include/netnatm/msg/uni_hdr.h
OLD_FILES+=usr/include/netnatm/msg/uni_ie.h
OLD_FILES+=usr/include/netnatm/msg/uni_msg.h
OLD_FILES+=usr/include/netnatm/msg/unimsglib.h
OLD_FILES+=usr/include/netnatm/msg/uniprint.h
OLD_FILES+=usr/include/netnatm/msg/unistruct.h
OLD_DIRS+=usr/include/netnatm/msg
OLD_FILES+=usr/include/netnatm/saal/sscfu.h
OLD_FILES+=usr/include/netnatm/saal/sscfudef.h
OLD_FILES+=usr/include/netnatm/saal/sscop.h
OLD_FILES+=usr/include/netnatm/saal/sscopdef.h
OLD_DIRS+=usr/include/netnatm/saal
OLD_FILES+=usr/include/netnatm/sig/uni.h
OLD_FILES+=usr/include/netnatm/sig/unidef.h
OLD_FILES+=usr/include/netnatm/sig/unisig.h
OLD_DIRS+=usr/include/netnatm/sig
OLD_FILES+=usr/include/netnatm/unimsg.h
OLD_DIRS+=usr/include/netnatm
OLD_FILES+=usr/lib/libngatm.a
OLD_FILES+=usr/lib/libngatm.so
OLD_LIBS+=usr/lib/libngatm.so.4
OLD_FILES+=usr/lib/libngatm_p.a
OLD_FILES+=usr/share/man/man1/sscop.1.gz
OLD_FILES+=usr/share/man/man3/libngatm.3.gz
OLD_FILES+=usr/share/man/man3/uniaddr.3.gz
OLD_FILES+=usr/share/man/man3/unifunc.3.gz
OLD_FILES+=usr/share/man/man3/unimsg.3.gz
OLD_FILES+=usr/share/man/man3/unisap.3.gz
OLD_FILES+=usr/share/man/man3/unistruct.3.gz
OLD_FILES+=usr/share/man/man4/ng_atmllc.4.gz
OLD_FILES+=usr/share/man/man4/ng_ccatm.4.gz
OLD_FILES+=usr/share/man/man4/ng_sscfu.4.gz
OLD_FILES+=usr/share/man/man4/ng_sscop.4.gz
OLD_FILES+=usr/share/man/man4/ng_uni.4.gz
OLD_FILES+=usr/share/man/man4/ngatmbase.4.gz
# 20230308: machine-id merged into hostid_save
OLD_FILES+=etc/rc.d/machine-id
# 20230306: remove tzsetwall(3)
OLD_FILES+=usr/share/man/man3/tzsetwall.3.gz
# 20230208: new clang import which bumps version from 14.0.5 to 15.0.7
OLD_FILES+=usr/lib/clang/14.0.5/include/cuda_wrappers/algorithm
OLD_FILES+=usr/lib/clang/14.0.5/include/cuda_wrappers/complex
OLD_FILES+=usr/lib/clang/14.0.5/include/cuda_wrappers/new
OLD_DIRS+=usr/lib/clang/14.0.5/include/cuda_wrappers
OLD_FILES+=usr/lib/clang/14.0.5/include/fuzzer/FuzzedDataProvider.h
OLD_DIRS+=usr/lib/clang/14.0.5/include/fuzzer
OLD_FILES+=usr/lib/clang/14.0.5/include/openmp_wrappers/__clang_openmp_device_functions.h
OLD_FILES+=usr/lib/clang/14.0.5/include/openmp_wrappers/cmath
OLD_FILES+=usr/lib/clang/14.0.5/include/openmp_wrappers/complex
OLD_FILES+=usr/lib/clang/14.0.5/include/openmp_wrappers/complex.h
OLD_FILES+=usr/lib/clang/14.0.5/include/openmp_wrappers/complex_cmath.h
OLD_FILES+=usr/lib/clang/14.0.5/include/openmp_wrappers/math.h
OLD_FILES+=usr/lib/clang/14.0.5/include/openmp_wrappers/new
OLD_DIRS+=usr/lib/clang/14.0.5/include/openmp_wrappers
OLD_FILES+=usr/lib/clang/14.0.5/include/ppc_wrappers/emmintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/ppc_wrappers/mm_malloc.h
OLD_FILES+=usr/lib/clang/14.0.5/include/ppc_wrappers/mmintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/ppc_wrappers/pmmintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/ppc_wrappers/smmintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/ppc_wrappers/tmmintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/ppc_wrappers/xmmintrin.h
OLD_DIRS+=usr/lib/clang/14.0.5/include/ppc_wrappers
OLD_FILES+=usr/lib/clang/14.0.5/include/profile/InstrProfData.inc
OLD_FILES+=usr/lib/clang/14.0.5/include/profile/MemProfData.inc
OLD_DIRS+=usr/lib/clang/14.0.5/include/profile
OLD_FILES+=usr/lib/clang/14.0.5/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/14.0.5/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/14.0.5/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/14.0.5/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/14.0.5/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/14.0.5/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/14.0.5/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/14.0.5/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/14.0.5/include/sanitizer/memprof_interface.h
OLD_FILES+=usr/lib/clang/14.0.5/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/14.0.5/include/sanitizer/netbsd_syscall_hooks.h
OLD_FILES+=usr/lib/clang/14.0.5/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/14.0.5/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/14.0.5/include/sanitizer/tsan_interface_atomic.h
OLD_FILES+=usr/lib/clang/14.0.5/include/sanitizer/ubsan_interface.h
OLD_DIRS+=usr/lib/clang/14.0.5/include/sanitizer
OLD_FILES+=usr/lib/clang/14.0.5/include/xray/xray_interface.h
OLD_FILES+=usr/lib/clang/14.0.5/include/xray/xray_log_interface.h
OLD_FILES+=usr/lib/clang/14.0.5/include/xray/xray_records.h
OLD_DIRS+=usr/lib/clang/14.0.5/include/xray
OLD_FILES+=usr/lib/clang/14.0.5/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/14.0.5/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/14.0.5/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/14.0.5/include/__clang_cuda_device_functions.h
OLD_FILES+=usr/lib/clang/14.0.5/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/14.0.5/include/__clang_cuda_libdevice_declares.h
OLD_FILES+=usr/lib/clang/14.0.5/include/__clang_cuda_math.h
OLD_FILES+=usr/lib/clang/14.0.5/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/14.0.5/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/14.0.5/include/__clang_cuda_texture_intrinsics.h
OLD_FILES+=usr/lib/clang/14.0.5/include/__clang_hip_cmath.h
OLD_FILES+=usr/lib/clang/14.0.5/include/__clang_hip_libdevice_declares.h
OLD_FILES+=usr/lib/clang/14.0.5/include/__clang_hip_math.h
OLD_FILES+=usr/lib/clang/14.0.5/include/__clang_hip_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/14.0.5/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/14.0.5/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/14.0.5/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/14.0.5/include/adxintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/altivec.h
OLD_FILES+=usr/lib/clang/14.0.5/include/ammintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/amxintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/arm64intr.h
OLD_FILES+=usr/lib/clang/14.0.5/include/arm_acle.h
OLD_FILES+=usr/lib/clang/14.0.5/include/arm_bf16.h
OLD_FILES+=usr/lib/clang/14.0.5/include/arm_cde.h
OLD_FILES+=usr/lib/clang/14.0.5/include/arm_cmse.h
OLD_FILES+=usr/lib/clang/14.0.5/include/arm_fp16.h
OLD_FILES+=usr/lib/clang/14.0.5/include/arm_mve.h
OLD_FILES+=usr/lib/clang/14.0.5/include/arm_neon.h
OLD_FILES+=usr/lib/clang/14.0.5/include/arm_neon_sve_bridge.h
OLD_FILES+=usr/lib/clang/14.0.5/include/arm_sve.h
OLD_FILES+=usr/lib/clang/14.0.5/include/armintr.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512bf16intrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512fp16intrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512vlbf16intrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512vlfp16intrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512vlvp2intersectintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512vp2intersectintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avxintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/avxvnniintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/builtins.h
OLD_FILES+=usr/lib/clang/14.0.5/include/cet.h
OLD_FILES+=usr/lib/clang/14.0.5/include/cetintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/cldemoteintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/cpuid.h
OLD_FILES+=usr/lib/clang/14.0.5/include/crc32intrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/emmintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/enqcmdintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/float.h
OLD_FILES+=usr/lib/clang/14.0.5/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/hexagon_circ_brev_intrinsics.h
OLD_FILES+=usr/lib/clang/14.0.5/include/hexagon_protos.h
OLD_FILES+=usr/lib/clang/14.0.5/include/hexagon_types.h
OLD_FILES+=usr/lib/clang/14.0.5/include/hresetintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/htmintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/hvx_hexagon_protos.h
OLD_FILES+=usr/lib/clang/14.0.5/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/immintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/intrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/inttypes.h
OLD_FILES+=usr/lib/clang/14.0.5/include/invpcidintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/iso646.h
OLD_FILES+=usr/lib/clang/14.0.5/include/keylockerintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/limits.h
OLD_FILES+=usr/lib/clang/14.0.5/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/14.0.5/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/14.0.5/include/mmintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/module.modulemap
OLD_FILES+=usr/lib/clang/14.0.5/include/movdirintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/msa.h
OLD_FILES+=usr/lib/clang/14.0.5/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/omp-tools.h
OLD_FILES+=usr/lib/clang/14.0.5/include/omp.h
OLD_FILES+=usr/lib/clang/14.0.5/include/ompt.h
OLD_FILES+=usr/lib/clang/14.0.5/include/opencl-c-base.h
OLD_FILES+=usr/lib/clang/14.0.5/include/opencl-c.h
OLD_FILES+=usr/lib/clang/14.0.5/include/pconfigintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/ptwriteintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/riscv_vector.h
OLD_FILES+=usr/lib/clang/14.0.5/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/s390intrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/serializeintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/sgxintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/shaintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/smmintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/stdalign.h
OLD_FILES+=usr/lib/clang/14.0.5/include/stdarg.h
OLD_FILES+=usr/lib/clang/14.0.5/include/stdatomic.h
OLD_FILES+=usr/lib/clang/14.0.5/include/stdbool.h
OLD_FILES+=usr/lib/clang/14.0.5/include/stddef.h
OLD_FILES+=usr/lib/clang/14.0.5/include/stdint.h
OLD_FILES+=usr/lib/clang/14.0.5/include/stdnoreturn.h
OLD_FILES+=usr/lib/clang/14.0.5/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/tgmath.h
OLD_FILES+=usr/lib/clang/14.0.5/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/tsxldtrkintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/uintrintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/unwind.h
OLD_FILES+=usr/lib/clang/14.0.5/include/vadefs.h
OLD_FILES+=usr/lib/clang/14.0.5/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/varargs.h
OLD_FILES+=usr/lib/clang/14.0.5/include/vecintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/waitpkgintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/wasm_simd128.h
OLD_FILES+=usr/lib/clang/14.0.5/include/wbnoinvdintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/x86gprintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/x86intrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/xopintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/14.0.5/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/14.0.5/include
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan-aarch64.so
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan-arm.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan-arm.so
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan-armhf.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan-armhf.so
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan-powerpc64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan-powerpc64.so
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan-powerpc64le.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan-powerpc64le.so
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan-preinit-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan-preinit-arm.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan-preinit-armhf.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan-preinit-powerpc64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan-preinit-powerpc64le.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan_cxx-arm.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan_cxx-armhf.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan_cxx-powerpc64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan_cxx-powerpc64le.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan_static-i386.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan_static-powerpc64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan_static-powerpc64le.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.asan_static-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.cfi-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.cfi-arm.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.cfi-armhf.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.cfi-i386.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.cfi-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.cfi_diag-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.cfi_diag-arm.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.cfi_diag-armhf.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.cfi_diag-i386.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.cfi_diag-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.dd-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.dd-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.fuzzer-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.fuzzer-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.fuzzer_interceptors-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.fuzzer_no_main-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.fuzzer_no_main-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.msan-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.msan-powerpc64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.msan-powerpc64le.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.msan-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.msan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.msan_cxx-powerpc64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.msan_cxx-powerpc64le.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.profile-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.profile-powerpc.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.profile-powerpc64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.profile-powerpc64le.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.safestack-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.stats-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.stats-arm.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.stats-armhf.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.stats-powerpc64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.stats-powerpc64le.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.stats_client-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.stats_client-arm.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.stats_client-armhf.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.stats_client-powerpc64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.stats_client-powerpc64le.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.tsan-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.tsan-powerpc64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.tsan-powerpc64le.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.tsan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.tsan_cxx-powerpc64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.tsan_cxx-powerpc64le.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.ubsan_minimal-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.ubsan_minimal-arm.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.ubsan_minimal-armhf.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.ubsan_minimal-powerpc64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.ubsan_minimal-powerpc64le.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.ubsan_standalone-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.ubsan_standalone-arm.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.ubsan_standalone-armhf.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.ubsan_standalone-powerpc64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.ubsan_standalone-powerpc64le.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.ubsan_standalone_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.ubsan_standalone_cxx-arm.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.ubsan_standalone_cxx-armhf.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.ubsan_standalone_cxx-powerpc64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.ubsan_standalone_cxx-powerpc64le.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.xray-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.xray-arm.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.xray-armhf.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.xray-basic-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.xray-basic-arm.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.xray-basic-armhf.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.xray-basic-powerpc64le.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.xray-basic-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.xray-fdr-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.xray-fdr-arm.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.xray-fdr-armhf.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.xray-fdr-powerpc64le.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.xray-fdr-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.xray-powerpc64le.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.xray-profiling-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.xray-profiling-arm.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.xray-profiling-armhf.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.xray-profiling-powerpc64le.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.xray-profiling-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.5/lib/freebsd/libclang_rt.xray-x86_64.a
OLD_DIRS+=usr/lib/clang/14.0.5/lib/freebsd
OLD_DIRS+=usr/lib/clang/14.0.5/lib
OLD_FILES+=usr/lib/clang/14.0.5/share/asan_ignorelist.txt
OLD_FILES+=usr/lib/clang/14.0.5/share/cfi_ignorelist.txt
OLD_FILES+=usr/lib/clang/14.0.5/share/msan_ignorelist.txt
OLD_DIRS+=usr/lib/clang/14.0.5/share
OLD_DIRS+=usr/lib/clang/14.0.5
# 20230208: new libc++ import which bumps version from 14.0.5 to 15.0.7
OLD_FILES+=usr/include/c++/v1/__functional_base
OLD_FILES+=usr/include/c++/v1/__libcpp_version
OLD_FILES+=usr/include/c++/v1/__nullptr
OLD_FILES+=usr/include/c++/v1/__string
OLD_FILES+=usr/include/c++/v1/experimental/filesystem
# 20230203: loader help files renamed
OLD_FILES+=boot/loader.help
# 20230201: timeout moved from /usr/bin to /bin
OLD_FILES+=usr/tests/usr.bin/timeout/Kyuafile
OLD_FILES+=usr/tests/usr.bin/timeout/timeout_test
# 20230124: rtalloc.9 removed
OLD_FILES+=usr/share/man/man9/rtalloc.9.gz
OLD_FILES+=usr/share/man/man9/rtalloc_fib.9.gz
OLD_FILES+=usr/share/man/man9/rtalloc_ign.9.gz
OLD_FILES+=usr/share/man/man9/rtalloc_ign_fib.9.gz
OLD_FILES+=usr/share/man/man9/rtalloc1.9.gz
OLD_FILES+=usr/share/man/man9/rtalloc1_fib.9.gz
OLD_FILES+=usr/share/man/man9/rtfree.9.gz
OLD_FILES+=usr/share/man/man9/RT_ADDREF.9.gz
OLD_FILES+=usr/share/man/man9/RT_LOCK.9.gz
OLD_FILES+=usr/share/man/man9/RT_REMREF.9.gz
OLD_FILES+=usr/share/man/man9/RT_RTFREE.9.gz
OLD_FILES+=usr/share/man/man9/RT_UNLOCK.9.gz
OLD_FILES+=usr/share/man/man9/RTFREE.9.gz
OLD_FILES+=usr/share/man/man9/RTFREE_LOCKED.9.gz
# 20230123: PCBGROUP.9 removed
OLD_FILES+=usr/share/man/man9/PCBGROUP.9.gz
OLD_FILES+=usr/share/man/man9/in_pcbgroup_byhash.9.gz
OLD_FILES+=usr/share/man/man9/in_pcbgroup_byinpcb.9.gz
OLD_FILES+=usr/share/man/man9/in_pcbgroup_destroy.9.gz
OLD_FILES+=usr/share/man/man9/in_pcbgroup_enabled.9.gz
OLD_FILES+=usr/share/man/man9/in_pcbgroup_init.9.gz
OLD_FILES+=usr/share/man/man9/in_pcbgroup_remove.9.gz
OLD_FILES+=usr/share/man/man9/in_pcbgroup_update.9.gz
OLD_FILES+=usr/share/man/man9/in_pcbgroup_update_mbuf.9.gz
OLD_FILES+=usr/share/man/man9/in6_pcbgroup_byhash.9.gz
# 20221214: TCPDEBUG removed
OLD_FILES+=usr/include/netinet/tcp_debug.h
# 20221213: remove sync serial drivers and utilities
OLD_FILES+=sbin/sconfig
OLD_FILES+=usr/share/man/man4/ce.4
OLD_FILES+=usr/share/man/man4/cp.4
OLD_FILES+=usr/share/man/man8/sconfig.8.gz
# 20221202: remove trpt(8)
OLD_FILES+=usr/sbin/trpt
OLD_FILES+=usr/share/man/man8/trpt.8.gz
# 20221117: remove typo'd man page link
OLD_FILES+=usr/share/man/man9/vm_map_wire_mapped.9.gz
# 20221114: remove othermta
OLD_FILES+=etc/rc.d/othermta
# 20221109: remove rc.sendmail(8)
OLD_FILES+=etc/rc.sendmail
OLD_FILES+=usr/share/man/man8/rc.sendmail.8.gz
# 20221015: update the ithread(9) man page
OLD_FILES+=usr/share/man/man9/ithread.9.gz
OLD_FILES+=usr/share/man/man9/ithread_add_handler.9.gz
OLD_FILES+=usr/share/man/man9/ithread_create.9.gz
OLD_FILES+=usr/share/man/man9/ithread_destroy.9.gz
OLD_FILES+=usr/share/man/man9/ithread_priority.9.gz
OLD_FILES+=usr/share/man/man9/ithread_remove_handler.9.gz
OLD_FILES+=usr/share/man/man9/ithread_schedule.9.gz
# 20221012: remove nls support from sort
OLD_FILES+=usr/share/nls/hu_HU.ISO8859-2/sort.cat
# 20221003: ip6protosw.h removed
OLD_FILES+=usr/include/netinet6/ip6protosw.h
# 20221001: deorbit opie
OLD_FILES+=etc/opieaccess
OLD_FILES+=etc/opiekeys
OLD_FILES+=usr/bin/opieinfo
OLD_FILES+=usr/bin/opiekey
OLD_FILES+=usr/bin/opiepasswd
OLD_FILES+=usr/bin/otp-md4
OLD_FILES+=usr/bin/otp-md5
OLD_FILES+=usr/bin/otp-sha1
OLD_FILES+=usr/include/opie.h
OLD_FILES+=usr/lib/libopie.a
OLD_FILES+=usr/lib/libopie.so
OLD_LIBS+=usr/lib/libopie.so.8
OLD_FILES+=usr/lib/libopie_p.a
OLD_FILES+=usr/lib/pam_opie.so
OLD_LIBS+=usr/lib/pam_opie.so.6
OLD_FILES+=usr/lib/pam_opieaccess.so
OLD_LIBS+=usr/lib/pam_opieaccess.so.6
OLD_FILES+=usr/share/man/man1/opieinfo.1.gz
OLD_FILES+=usr/share/man/man1/opiekey.1.gz
OLD_FILES+=usr/share/man/man1/opiepasswd.1.gz
OLD_FILES+=usr/share/man/man1/otp-md4.1.gz
OLD_FILES+=usr/share/man/man1/otp-md5.1.gz
OLD_FILES+=usr/share/man/man1/otp-sha1.1.gz
OLD_FILES+=usr/share/man/man4/opie.4.gz
OLD_FILES+=usr/share/man/man4/skey.4.gz
OLD_FILES+=usr/share/man/man5/opieaccess.5.gz
OLD_FILES+=usr/share/man/man5/opiekeys.5.gz
OLD_FILES+=usr/share/man/man8/pam_opie.8.gz
OLD_FILES+=usr/share/man/man8/pam_opieaccess.8.gz
# 20220928: telnetd(8) removed
OLD_FILES+=etc/pam.d/telnetd
OLD_FILES+=usr/libexec/telnetd
OLD_FILES+=usr/share/man/man8/telnetd.8.gz
# 20220914: domain(9) updated
OLD_FILES+=usr/share/man/man9/domain_init.9.gz
OLD_FILES+=usr/share/man/man9/pfctlinput.9.gz
OLD_FILES+=usr/share/man/man9/pffinddomain.9.gz
OLD_FILES+=usr/share/man/man9/pffindproto.9.gz
OLD_FILES+=usr/share/man/man9/pffindtype.9.gz
# 20220825: awk tests moved to subdirs
OLD_FILES+=usr/tests/usr.bin/awk/awk_test
OLD_FILES+=usr/tests/usr.bin/awk/d_assign_NF.awk
OLD_FILES+=usr/tests/usr.bin/awk/d_assign_NF.in
OLD_FILES+=usr/tests/usr.bin/awk/d_assign_NF.out
OLD_FILES+=usr/tests/usr.bin/awk/d_big_regexp.awk
OLD_FILES+=usr/tests/usr.bin/awk/d_big_regexp.in
OLD_FILES+=usr/tests/usr.bin/awk/d_big_regexp.out
OLD_FILES+=usr/tests/usr.bin/awk/d_end1.awk
OLD_FILES+=usr/tests/usr.bin/awk/d_end1.in
OLD_FILES+=usr/tests/usr.bin/awk/d_end1.out
OLD_FILES+=usr/tests/usr.bin/awk/d_end2.awk
OLD_FILES+=usr/tests/usr.bin/awk/d_end2.in
OLD_FILES+=usr/tests/usr.bin/awk/d_end2.out
OLD_FILES+=usr/tests/usr.bin/awk/d_period.awk
OLD_FILES+=usr/tests/usr.bin/awk/d_period.in
OLD_FILES+=usr/tests/usr.bin/awk/d_period.out
OLD_FILES+=usr/tests/usr.bin/awk/d_string1.awk
OLD_FILES+=usr/tests/usr.bin/awk/d_string1.out
OLD_FILES+=usr/tests/usr.bin/awk/d_tolower.awk
OLD_FILES+=usr/tests/usr.bin/awk/d_tolower.in
OLD_FILES+=usr/tests/usr.bin/awk/d_tolower.out
OLD_FILES+=usr/tests/usr.bin/awk/d_toupper.awk
OLD_FILES+=usr/tests/usr.bin/awk/d_toupper.in
OLD_FILES+=usr/tests/usr.bin/awk/d_toupper.out
# 20220820: remove knlist_init_rw_reader()
OLD_FILES+=usr/share/man/man9/knlist_init_rw_reader.9.gz
# 20220813: minigzip(1) removed in favor of gzip(1)
OLD_FILES+=usr/bin/minigzip
OLD_FILES+=usr/share/man/man1/minigzip.1.gz
# 20220811: MP_WATCHDOG removed
OLD_FILES+=usr/include/machine/mp_watchdog.h
# 20220811: new iconv encoder trait added
OLD_LIBS+=usr/lib/i18n/libBIG5.so.4
OLD_LIBS+=usr/lib/i18n/libDECHanyu.so.4
OLD_LIBS+=usr/lib/i18n/libEUC.so.4
OLD_LIBS+=usr/lib/i18n/libEUCTW.so.4
OLD_LIBS+=usr/lib/i18n/libGBK2K.so.4
OLD_LIBS+=usr/lib/i18n/libHZ.so.4
OLD_LIBS+=usr/lib/i18n/libISO2022.so.4
OLD_LIBS+=usr/lib/i18n/libJOHAB.so.4
OLD_LIBS+=usr/lib/i18n/libMSKanji.so.4
OLD_LIBS+=usr/lib/i18n/libUES.so.4
OLD_LIBS+=usr/lib/i18n/libUTF1632.so.4
OLD_LIBS+=usr/lib/i18n/libUTF7.so.4
OLD_LIBS+=usr/lib/i18n/libUTF8.so.4
OLD_LIBS+=usr/lib/i18n/libVIQR.so.4
OLD_LIBS+=usr/lib/i18n/libZW.so.4
OLD_LIBS+=usr/lib/i18n/libiconv_none.so.4
OLD_LIBS+=usr/lib/i18n/libiconv_std.so.4
OLD_LIBS+=usr/lib/i18n/libmapper_646.so.4
OLD_LIBS+=usr/lib/i18n/libmapper_none.so.4
OLD_LIBS+=usr/lib/i18n/libmapper_parallel.so.4
OLD_LIBS+=usr/lib/i18n/libmapper_serial.so.4
OLD_LIBS+=usr/lib/i18n/libmapper_std.so.4
OLD_LIBS+=usr/lib/i18n/libmapper_zone.so.4
# 20220811: raw socket layer removed
OLD_FILES+=usr/include/net/raw_cb.h
# 20220721: libnv version bumps
OLD_LIBS+=lib/libnv.so.0
# 20220624: unix_passfd_test -> unix_passfd_stream/unix_passfd_dgram
OLD_FILES+=usr/tests/sys/kern/unix_passfd_test
# 20220621: ISA sound card drivers removed
OLD_FILES+=usr/share/man/man4/snd_ad1816.4.gz
OLD_FILES+=usr/share/man/man4/snd_ess.4.gz
OLD_FILES+=usr/share/man/man4/snd_gusc.4.gz
OLD_FILES+=usr/share/man/man4/snd_mss.4.gz
OLD_FILES+=usr/share/man/man4/snd_sb16.4.gz
OLD_FILES+=usr/share/man/man4/snd_sb8.4.gz
OLD_FILES+=usr/share/man/man4/snd_sbc.4.gz
# 20220612: new clang import which bumps version from 14.0.4 to 14.0.5
OLD_FILES+=usr/lib/clang/14.0.4/include/cuda_wrappers/algorithm
OLD_FILES+=usr/lib/clang/14.0.4/include/cuda_wrappers/complex
OLD_FILES+=usr/lib/clang/14.0.4/include/cuda_wrappers/new
OLD_DIRS+=usr/lib/clang/14.0.4/include/cuda_wrappers
OLD_FILES+=usr/lib/clang/14.0.4/include/fuzzer/FuzzedDataProvider.h
OLD_DIRS+=usr/lib/clang/14.0.4/include/fuzzer
OLD_FILES+=usr/lib/clang/14.0.4/include/openmp_wrappers/__clang_openmp_device_functions.h
OLD_FILES+=usr/lib/clang/14.0.4/include/openmp_wrappers/cmath
OLD_FILES+=usr/lib/clang/14.0.4/include/openmp_wrappers/complex
OLD_FILES+=usr/lib/clang/14.0.4/include/openmp_wrappers/complex.h
OLD_FILES+=usr/lib/clang/14.0.4/include/openmp_wrappers/complex_cmath.h
OLD_FILES+=usr/lib/clang/14.0.4/include/openmp_wrappers/math.h
OLD_FILES+=usr/lib/clang/14.0.4/include/openmp_wrappers/new
OLD_DIRS+=usr/lib/clang/14.0.4/include/openmp_wrappers
OLD_FILES+=usr/lib/clang/14.0.4/include/ppc_wrappers/emmintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/ppc_wrappers/mm_malloc.h
OLD_FILES+=usr/lib/clang/14.0.4/include/ppc_wrappers/mmintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/ppc_wrappers/pmmintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/ppc_wrappers/smmintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/ppc_wrappers/tmmintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/ppc_wrappers/xmmintrin.h
OLD_DIRS+=usr/lib/clang/14.0.4/include/ppc_wrappers
OLD_FILES+=usr/lib/clang/14.0.4/include/profile/InstrProfData.inc
OLD_FILES+=usr/lib/clang/14.0.4/include/profile/MemProfData.inc
OLD_DIRS+=usr/lib/clang/14.0.4/include/profile
OLD_FILES+=usr/lib/clang/14.0.4/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/14.0.4/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/14.0.4/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/14.0.4/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/14.0.4/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/14.0.4/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/14.0.4/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/14.0.4/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/14.0.4/include/sanitizer/memprof_interface.h
OLD_FILES+=usr/lib/clang/14.0.4/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/14.0.4/include/sanitizer/netbsd_syscall_hooks.h
OLD_FILES+=usr/lib/clang/14.0.4/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/14.0.4/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/14.0.4/include/sanitizer/tsan_interface_atomic.h
OLD_FILES+=usr/lib/clang/14.0.4/include/sanitizer/ubsan_interface.h
OLD_DIRS+=usr/lib/clang/14.0.4/include/sanitizer
OLD_FILES+=usr/lib/clang/14.0.4/include/xray/xray_interface.h
OLD_FILES+=usr/lib/clang/14.0.4/include/xray/xray_log_interface.h
OLD_FILES+=usr/lib/clang/14.0.4/include/xray/xray_records.h
OLD_DIRS+=usr/lib/clang/14.0.4/include/xray
OLD_FILES+=usr/lib/clang/14.0.4/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/14.0.4/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/14.0.4/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/14.0.4/include/__clang_cuda_device_functions.h
OLD_FILES+=usr/lib/clang/14.0.4/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/14.0.4/include/__clang_cuda_libdevice_declares.h
OLD_FILES+=usr/lib/clang/14.0.4/include/__clang_cuda_math.h
OLD_FILES+=usr/lib/clang/14.0.4/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/14.0.4/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/14.0.4/include/__clang_cuda_texture_intrinsics.h
OLD_FILES+=usr/lib/clang/14.0.4/include/__clang_hip_cmath.h
OLD_FILES+=usr/lib/clang/14.0.4/include/__clang_hip_libdevice_declares.h
OLD_FILES+=usr/lib/clang/14.0.4/include/__clang_hip_math.h
OLD_FILES+=usr/lib/clang/14.0.4/include/__clang_hip_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/14.0.4/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/14.0.4/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/14.0.4/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/14.0.4/include/adxintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/altivec.h
OLD_FILES+=usr/lib/clang/14.0.4/include/ammintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/amxintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/arm64intr.h
OLD_FILES+=usr/lib/clang/14.0.4/include/arm_acle.h
OLD_FILES+=usr/lib/clang/14.0.4/include/arm_bf16.h
OLD_FILES+=usr/lib/clang/14.0.4/include/arm_cde.h
OLD_FILES+=usr/lib/clang/14.0.4/include/arm_cmse.h
OLD_FILES+=usr/lib/clang/14.0.4/include/arm_fp16.h
OLD_FILES+=usr/lib/clang/14.0.4/include/arm_mve.h
OLD_FILES+=usr/lib/clang/14.0.4/include/arm_neon.h
OLD_FILES+=usr/lib/clang/14.0.4/include/arm_neon_sve_bridge.h
OLD_FILES+=usr/lib/clang/14.0.4/include/arm_sve.h
OLD_FILES+=usr/lib/clang/14.0.4/include/armintr.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512bf16intrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512fp16intrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512vlbf16intrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512vlfp16intrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512vlvp2intersectintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512vp2intersectintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avxintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/avxvnniintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/builtins.h
OLD_FILES+=usr/lib/clang/14.0.4/include/cet.h
OLD_FILES+=usr/lib/clang/14.0.4/include/cetintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/cldemoteintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/cpuid.h
OLD_FILES+=usr/lib/clang/14.0.4/include/crc32intrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/emmintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/enqcmdintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/float.h
OLD_FILES+=usr/lib/clang/14.0.4/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/hexagon_circ_brev_intrinsics.h
OLD_FILES+=usr/lib/clang/14.0.4/include/hexagon_protos.h
OLD_FILES+=usr/lib/clang/14.0.4/include/hexagon_types.h
OLD_FILES+=usr/lib/clang/14.0.4/include/hresetintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/htmintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/hvx_hexagon_protos.h
OLD_FILES+=usr/lib/clang/14.0.4/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/immintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/intrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/inttypes.h
OLD_FILES+=usr/lib/clang/14.0.4/include/invpcidintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/iso646.h
OLD_FILES+=usr/lib/clang/14.0.4/include/keylockerintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/limits.h
OLD_FILES+=usr/lib/clang/14.0.4/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/14.0.4/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/14.0.4/include/mmintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/module.modulemap
OLD_FILES+=usr/lib/clang/14.0.4/include/movdirintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/msa.h
OLD_FILES+=usr/lib/clang/14.0.4/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/omp-tools.h
OLD_FILES+=usr/lib/clang/14.0.4/include/omp.h
OLD_FILES+=usr/lib/clang/14.0.4/include/ompt.h
OLD_FILES+=usr/lib/clang/14.0.4/include/opencl-c-base.h
OLD_FILES+=usr/lib/clang/14.0.4/include/opencl-c.h
OLD_FILES+=usr/lib/clang/14.0.4/include/pconfigintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/ptwriteintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/riscv_vector.h
OLD_FILES+=usr/lib/clang/14.0.4/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/s390intrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/serializeintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/sgxintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/shaintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/smmintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/stdalign.h
OLD_FILES+=usr/lib/clang/14.0.4/include/stdarg.h
OLD_FILES+=usr/lib/clang/14.0.4/include/stdatomic.h
OLD_FILES+=usr/lib/clang/14.0.4/include/stdbool.h
OLD_FILES+=usr/lib/clang/14.0.4/include/stddef.h
OLD_FILES+=usr/lib/clang/14.0.4/include/stdint.h
OLD_FILES+=usr/lib/clang/14.0.4/include/stdnoreturn.h
OLD_FILES+=usr/lib/clang/14.0.4/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/tgmath.h
OLD_FILES+=usr/lib/clang/14.0.4/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/tsxldtrkintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/uintrintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/unwind.h
OLD_FILES+=usr/lib/clang/14.0.4/include/vadefs.h
OLD_FILES+=usr/lib/clang/14.0.4/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/varargs.h
OLD_FILES+=usr/lib/clang/14.0.4/include/vecintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/waitpkgintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/wasm_simd128.h
OLD_FILES+=usr/lib/clang/14.0.4/include/wbnoinvdintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/x86gprintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/x86intrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/xopintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/14.0.4/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/14.0.4/include
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan-aarch64.so
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan-arm.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan-arm.so
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan-armhf.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan-armhf.so
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan-preinit-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan-preinit-arm.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan-preinit-armhf.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan_cxx-arm.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan_cxx-armhf.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan_static-i386.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.asan_static-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.cfi-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.cfi-arm.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.cfi-armhf.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.cfi-i386.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.cfi-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.cfi_diag-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.cfi_diag-arm.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.cfi_diag-armhf.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.cfi_diag-i386.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.cfi_diag-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.dd-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.dd-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.fuzzer-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.fuzzer-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.fuzzer_interceptors-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.fuzzer_no_main-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.fuzzer_no_main-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.msan-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.msan-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.msan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.profile-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.profile-powerpc.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.profile-powerpc64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.profile-powerpc64le.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.safestack-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.stats-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.stats-arm.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.stats-armhf.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.stats_client-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.stats_client-arm.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.stats_client-armhf.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.tsan-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.tsan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.ubsan_minimal-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.ubsan_minimal-arm.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.ubsan_minimal-armhf.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.ubsan_standalone-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.ubsan_standalone-arm.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.ubsan_standalone-armhf.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.ubsan_standalone_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.ubsan_standalone_cxx-arm.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.ubsan_standalone_cxx-armhf.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.xray-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.xray-arm.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.xray-armhf.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.xray-basic-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.xray-basic-arm.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.xray-basic-armhf.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.xray-basic-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.xray-fdr-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.xray-fdr-arm.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.xray-fdr-armhf.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.xray-fdr-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.xray-profiling-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.xray-profiling-arm.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.xray-profiling-armhf.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.xray-profiling-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.4/lib/freebsd/libclang_rt.xray-x86_64.a
OLD_DIRS+=usr/lib/clang/14.0.4/lib/freebsd
OLD_DIRS+=usr/lib/clang/14.0.4/lib/share
OLD_DIRS+=usr/lib/clang/14.0.4/lib
OLD_FILES+=usr/lib/clang/14.0.4/share/asan_ignorelist.txt
OLD_FILES+=usr/lib/clang/14.0.4/share/cfi_ignorelist.txt
OLD_FILES+=usr/lib/clang/14.0.4/share/msan_ignorelist.txt
OLD_DIRS+=usr/lib/clang/14.0.4/share
OLD_DIRS+=usr/lib/clang/14.0.4
# 20220605: new clang import which bumps version from 14.0.3 to 14.0.4
OLD_FILES+=usr/lib/clang/14.0.3/include/cuda_wrappers/algorithm
OLD_FILES+=usr/lib/clang/14.0.3/include/cuda_wrappers/complex
OLD_FILES+=usr/lib/clang/14.0.3/include/cuda_wrappers/new
OLD_DIRS+=usr/lib/clang/14.0.3/include/cuda_wrappers
OLD_FILES+=usr/lib/clang/14.0.3/include/fuzzer/FuzzedDataProvider.h
OLD_DIRS+=usr/lib/clang/14.0.3/include/fuzzer
OLD_FILES+=usr/lib/clang/14.0.3/include/openmp_wrappers/__clang_openmp_device_functions.h
OLD_FILES+=usr/lib/clang/14.0.3/include/openmp_wrappers/cmath
OLD_FILES+=usr/lib/clang/14.0.3/include/openmp_wrappers/complex
OLD_FILES+=usr/lib/clang/14.0.3/include/openmp_wrappers/complex.h
OLD_FILES+=usr/lib/clang/14.0.3/include/openmp_wrappers/complex_cmath.h
OLD_FILES+=usr/lib/clang/14.0.3/include/openmp_wrappers/math.h
OLD_FILES+=usr/lib/clang/14.0.3/include/openmp_wrappers/new
OLD_DIRS+=usr/lib/clang/14.0.3/include/openmp_wrappers
OLD_FILES+=usr/lib/clang/14.0.3/include/ppc_wrappers/emmintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/ppc_wrappers/mm_malloc.h
OLD_FILES+=usr/lib/clang/14.0.3/include/ppc_wrappers/mmintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/ppc_wrappers/pmmintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/ppc_wrappers/smmintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/ppc_wrappers/tmmintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/ppc_wrappers/xmmintrin.h
OLD_DIRS+=usr/lib/clang/14.0.3/include/ppc_wrappers
OLD_FILES+=usr/lib/clang/14.0.3/include/profile/InstrProfData.inc
OLD_FILES+=usr/lib/clang/14.0.3/include/profile/MemProfData.inc
OLD_DIRS+=usr/lib/clang/14.0.3/include/profile
OLD_FILES+=usr/lib/clang/14.0.3/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/14.0.3/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/14.0.3/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/14.0.3/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/14.0.3/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/14.0.3/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/14.0.3/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/14.0.3/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/14.0.3/include/sanitizer/memprof_interface.h
OLD_FILES+=usr/lib/clang/14.0.3/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/14.0.3/include/sanitizer/netbsd_syscall_hooks.h
OLD_FILES+=usr/lib/clang/14.0.3/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/14.0.3/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/14.0.3/include/sanitizer/tsan_interface_atomic.h
OLD_FILES+=usr/lib/clang/14.0.3/include/sanitizer/ubsan_interface.h
OLD_DIRS+=usr/lib/clang/14.0.3/include/sanitizer
OLD_FILES+=usr/lib/clang/14.0.3/include/xray/xray_interface.h
OLD_FILES+=usr/lib/clang/14.0.3/include/xray/xray_log_interface.h
OLD_FILES+=usr/lib/clang/14.0.3/include/xray/xray_records.h
OLD_DIRS+=usr/lib/clang/14.0.3/include/xray
OLD_FILES+=usr/lib/clang/14.0.3/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/14.0.3/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/14.0.3/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/14.0.3/include/__clang_cuda_device_functions.h
OLD_FILES+=usr/lib/clang/14.0.3/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/14.0.3/include/__clang_cuda_libdevice_declares.h
OLD_FILES+=usr/lib/clang/14.0.3/include/__clang_cuda_math.h
OLD_FILES+=usr/lib/clang/14.0.3/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/14.0.3/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/14.0.3/include/__clang_cuda_texture_intrinsics.h
OLD_FILES+=usr/lib/clang/14.0.3/include/__clang_hip_cmath.h
OLD_FILES+=usr/lib/clang/14.0.3/include/__clang_hip_libdevice_declares.h
OLD_FILES+=usr/lib/clang/14.0.3/include/__clang_hip_math.h
OLD_FILES+=usr/lib/clang/14.0.3/include/__clang_hip_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/14.0.3/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/14.0.3/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/14.0.3/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/14.0.3/include/adxintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/altivec.h
OLD_FILES+=usr/lib/clang/14.0.3/include/ammintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/amxintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/arm64intr.h
OLD_FILES+=usr/lib/clang/14.0.3/include/arm_acle.h
OLD_FILES+=usr/lib/clang/14.0.3/include/arm_bf16.h
OLD_FILES+=usr/lib/clang/14.0.3/include/arm_cde.h
OLD_FILES+=usr/lib/clang/14.0.3/include/arm_cmse.h
OLD_FILES+=usr/lib/clang/14.0.3/include/arm_fp16.h
OLD_FILES+=usr/lib/clang/14.0.3/include/arm_mve.h
OLD_FILES+=usr/lib/clang/14.0.3/include/arm_neon.h
OLD_FILES+=usr/lib/clang/14.0.3/include/arm_neon_sve_bridge.h
OLD_FILES+=usr/lib/clang/14.0.3/include/arm_sve.h
OLD_FILES+=usr/lib/clang/14.0.3/include/armintr.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512bf16intrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512fp16intrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512vlbf16intrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512vlfp16intrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512vlvp2intersectintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512vp2intersectintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avxintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/avxvnniintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/builtins.h
OLD_FILES+=usr/lib/clang/14.0.3/include/cet.h
OLD_FILES+=usr/lib/clang/14.0.3/include/cetintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/cldemoteintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/cpuid.h
OLD_FILES+=usr/lib/clang/14.0.3/include/crc32intrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/emmintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/enqcmdintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/float.h
OLD_FILES+=usr/lib/clang/14.0.3/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/hexagon_circ_brev_intrinsics.h
OLD_FILES+=usr/lib/clang/14.0.3/include/hexagon_protos.h
OLD_FILES+=usr/lib/clang/14.0.3/include/hexagon_types.h
OLD_FILES+=usr/lib/clang/14.0.3/include/hresetintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/htmintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/hvx_hexagon_protos.h
OLD_FILES+=usr/lib/clang/14.0.3/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/immintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/intrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/inttypes.h
OLD_FILES+=usr/lib/clang/14.0.3/include/invpcidintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/iso646.h
OLD_FILES+=usr/lib/clang/14.0.3/include/keylockerintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/limits.h
OLD_FILES+=usr/lib/clang/14.0.3/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/14.0.3/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/14.0.3/include/mmintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/module.modulemap
OLD_FILES+=usr/lib/clang/14.0.3/include/movdirintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/msa.h
OLD_FILES+=usr/lib/clang/14.0.3/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/omp-tools.h
OLD_FILES+=usr/lib/clang/14.0.3/include/omp.h
OLD_FILES+=usr/lib/clang/14.0.3/include/ompt.h
OLD_FILES+=usr/lib/clang/14.0.3/include/opencl-c-base.h
OLD_FILES+=usr/lib/clang/14.0.3/include/opencl-c.h
OLD_FILES+=usr/lib/clang/14.0.3/include/pconfigintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/ptwriteintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/riscv_vector.h
OLD_FILES+=usr/lib/clang/14.0.3/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/s390intrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/serializeintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/sgxintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/shaintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/smmintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/stdalign.h
OLD_FILES+=usr/lib/clang/14.0.3/include/stdarg.h
OLD_FILES+=usr/lib/clang/14.0.3/include/stdatomic.h
OLD_FILES+=usr/lib/clang/14.0.3/include/stdbool.h
OLD_FILES+=usr/lib/clang/14.0.3/include/stddef.h
OLD_FILES+=usr/lib/clang/14.0.3/include/stdint.h
OLD_FILES+=usr/lib/clang/14.0.3/include/stdnoreturn.h
OLD_FILES+=usr/lib/clang/14.0.3/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/tgmath.h
OLD_FILES+=usr/lib/clang/14.0.3/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/tsxldtrkintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/uintrintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/unwind.h
OLD_FILES+=usr/lib/clang/14.0.3/include/vadefs.h
OLD_FILES+=usr/lib/clang/14.0.3/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/varargs.h
OLD_FILES+=usr/lib/clang/14.0.3/include/vecintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/waitpkgintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/wasm_simd128.h
OLD_FILES+=usr/lib/clang/14.0.3/include/wbnoinvdintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/x86gprintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/x86intrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/xopintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/14.0.3/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/14.0.3/include
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan-aarch64.so
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan-arm.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan-arm.so
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan-armhf.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan-armhf.so
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan-preinit-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan-preinit-arm.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan-preinit-armhf.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan_cxx-arm.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan_cxx-armhf.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan_static-i386.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.asan_static-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.cfi-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.cfi-arm.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.cfi-armhf.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.cfi-i386.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.cfi-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.cfi_diag-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.cfi_diag-arm.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.cfi_diag-armhf.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.cfi_diag-i386.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.cfi_diag-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.dd-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.dd-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.fuzzer-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.fuzzer-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.fuzzer_interceptors-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.fuzzer_no_main-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.fuzzer_no_main-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.msan-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.msan-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.msan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.profile-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.profile-powerpc.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.profile-powerpc64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.profile-powerpc64le.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.safestack-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.stats-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.stats-arm.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.stats-armhf.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.stats_client-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.stats_client-arm.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.stats_client-armhf.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.tsan-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.tsan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.ubsan_minimal-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.ubsan_minimal-arm.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.ubsan_minimal-armhf.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.ubsan_standalone-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.ubsan_standalone-arm.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.ubsan_standalone-armhf.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.ubsan_standalone_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.ubsan_standalone_cxx-arm.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.ubsan_standalone_cxx-armhf.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.xray-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.xray-arm.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.xray-armhf.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.xray-basic-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.xray-basic-arm.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.xray-basic-armhf.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.xray-basic-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.xray-fdr-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.xray-fdr-arm.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.xray-fdr-armhf.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.xray-fdr-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.xray-profiling-aarch64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.xray-profiling-arm.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.xray-profiling-armhf.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.xray-profiling-x86_64.a
OLD_FILES+=usr/lib/clang/14.0.3/lib/freebsd/libclang_rt.xray-x86_64.a
OLD_DIRS+=usr/lib/clang/14.0.3/lib/freebsd
OLD_DIRS+=usr/lib/clang/14.0.3/lib/share
OLD_DIRS+=usr/lib/clang/14.0.3/lib
OLD_FILES+=usr/lib/clang/14.0.3/share/asan_ignorelist.txt
OLD_FILES+=usr/lib/clang/14.0.3/share/cfi_ignorelist.txt
OLD_FILES+=usr/lib/clang/14.0.3/share/msan_ignorelist.txt
OLD_DIRS+=usr/lib/clang/14.0.3/share
OLD_DIRS+=usr/lib/clang/14.0.3
# 20220524: libkqueue test updates
OLD_FILES+=usr/tests/sys/kqueue/libkqueue/kqtest
# 20220514: new clang import which bumps version from 13.0.0 to 14.0.3
OLD_FILES+=usr/lib/clang/13.0.0/include/cuda_wrappers/algorithm
OLD_FILES+=usr/lib/clang/13.0.0/include/cuda_wrappers/complex
OLD_FILES+=usr/lib/clang/13.0.0/include/cuda_wrappers/new
OLD_DIRS+=usr/lib/clang/13.0.0/include/cuda_wrappers
OLD_FILES+=usr/lib/clang/13.0.0/include/fuzzer/FuzzedDataProvider.h
OLD_DIRS+=usr/lib/clang/13.0.0/include/fuzzer
OLD_FILES+=usr/lib/clang/13.0.0/include/openmp_wrappers/__clang_openmp_device_functions.h
OLD_FILES+=usr/lib/clang/13.0.0/include/openmp_wrappers/cmath
OLD_FILES+=usr/lib/clang/13.0.0/include/openmp_wrappers/complex
OLD_FILES+=usr/lib/clang/13.0.0/include/openmp_wrappers/complex.h
OLD_FILES+=usr/lib/clang/13.0.0/include/openmp_wrappers/complex_cmath.h
OLD_FILES+=usr/lib/clang/13.0.0/include/openmp_wrappers/math.h
OLD_FILES+=usr/lib/clang/13.0.0/include/openmp_wrappers/new
OLD_DIRS+=usr/lib/clang/13.0.0/include/openmp_wrappers
OLD_FILES+=usr/lib/clang/13.0.0/include/ppc_wrappers/emmintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/ppc_wrappers/mm_malloc.h
OLD_FILES+=usr/lib/clang/13.0.0/include/ppc_wrappers/mmintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/ppc_wrappers/pmmintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/ppc_wrappers/smmintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/ppc_wrappers/tmmintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/ppc_wrappers/xmmintrin.h
OLD_DIRS+=usr/lib/clang/13.0.0/include/ppc_wrappers
OLD_FILES+=usr/lib/clang/13.0.0/include/profile/InstrProfData.inc
OLD_DIRS+=usr/lib/clang/13.0.0/include/profile
OLD_FILES+=usr/lib/clang/13.0.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/13.0.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/13.0.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/13.0.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/13.0.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/13.0.0/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/13.0.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/13.0.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/13.0.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/13.0.0/include/sanitizer/netbsd_syscall_hooks.h
OLD_FILES+=usr/lib/clang/13.0.0/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/13.0.0/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/13.0.0/include/sanitizer/tsan_interface_atomic.h
OLD_FILES+=usr/lib/clang/13.0.0/include/sanitizer/ubsan_interface.h
OLD_DIRS+=usr/lib/clang/13.0.0/include/sanitizer
OLD_FILES+=usr/lib/clang/13.0.0/include/xray/xray_interface.h
OLD_FILES+=usr/lib/clang/13.0.0/include/xray/xray_log_interface.h
OLD_FILES+=usr/lib/clang/13.0.0/include/xray/xray_records.h
OLD_DIRS+=usr/lib/clang/13.0.0/include/xray
OLD_FILES+=usr/lib/clang/13.0.0/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/13.0.0/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/13.0.0/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/13.0.0/include/__clang_cuda_device_functions.h
OLD_FILES+=usr/lib/clang/13.0.0/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/13.0.0/include/__clang_cuda_libdevice_declares.h
OLD_FILES+=usr/lib/clang/13.0.0/include/__clang_cuda_math.h
OLD_FILES+=usr/lib/clang/13.0.0/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/13.0.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/13.0.0/include/__clang_hip_cmath.h
OLD_FILES+=usr/lib/clang/13.0.0/include/__clang_hip_libdevice_declares.h
OLD_FILES+=usr/lib/clang/13.0.0/include/__clang_hip_math.h
OLD_FILES+=usr/lib/clang/13.0.0/include/__clang_hip_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/13.0.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/13.0.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/13.0.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/13.0.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/altivec.h
OLD_FILES+=usr/lib/clang/13.0.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/amxintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/arm64intr.h
OLD_FILES+=usr/lib/clang/13.0.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/13.0.0/include/arm_bf16.h
OLD_FILES+=usr/lib/clang/13.0.0/include/arm_cde.h
OLD_FILES+=usr/lib/clang/13.0.0/include/arm_cmse.h
OLD_FILES+=usr/lib/clang/13.0.0/include/arm_fp16.h
OLD_FILES+=usr/lib/clang/13.0.0/include/arm_mve.h
OLD_FILES+=usr/lib/clang/13.0.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/13.0.0/include/arm_sve.h
OLD_FILES+=usr/lib/clang/13.0.0/include/armintr.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512bf16intrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512vlbf16intrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512vlvp2intersectintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512vp2intersectintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/avxvnniintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/builtins.h
OLD_FILES+=usr/lib/clang/13.0.0/include/cet.h
OLD_FILES+=usr/lib/clang/13.0.0/include/cetintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/cldemoteintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/13.0.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/enqcmdintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/float.h
OLD_FILES+=usr/lib/clang/13.0.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/hexagon_circ_brev_intrinsics.h
OLD_FILES+=usr/lib/clang/13.0.0/include/hexagon_protos.h
OLD_FILES+=usr/lib/clang/13.0.0/include/hexagon_types.h
OLD_FILES+=usr/lib/clang/13.0.0/include/hresetintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/hvx_hexagon_protos.h
OLD_FILES+=usr/lib/clang/13.0.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/intrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/inttypes.h
OLD_FILES+=usr/lib/clang/13.0.0/include/invpcidintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/iso646.h
OLD_FILES+=usr/lib/clang/13.0.0/include/keylockerintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/limits.h
OLD_FILES+=usr/lib/clang/13.0.0/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/13.0.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/13.0.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/13.0.0/include/movdirintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/msa.h
OLD_FILES+=usr/lib/clang/13.0.0/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/omp-tools.h
OLD_FILES+=usr/lib/clang/13.0.0/include/omp.h
OLD_FILES+=usr/lib/clang/13.0.0/include/ompt.h
OLD_FILES+=usr/lib/clang/13.0.0/include/opencl-c-base.h
OLD_FILES+=usr/lib/clang/13.0.0/include/opencl-c.h
OLD_FILES+=usr/lib/clang/13.0.0/include/pconfigintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/ptwriteintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/riscv_vector.h
OLD_FILES+=usr/lib/clang/13.0.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/serializeintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/sgxintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/stdalign.h
OLD_FILES+=usr/lib/clang/13.0.0/include/stdarg.h
OLD_FILES+=usr/lib/clang/13.0.0/include/stdatomic.h
OLD_FILES+=usr/lib/clang/13.0.0/include/stdbool.h
OLD_FILES+=usr/lib/clang/13.0.0/include/stddef.h
OLD_FILES+=usr/lib/clang/13.0.0/include/stdint.h
OLD_FILES+=usr/lib/clang/13.0.0/include/stdnoreturn.h
OLD_FILES+=usr/lib/clang/13.0.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/tgmath.h
OLD_FILES+=usr/lib/clang/13.0.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/tsxldtrkintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/uintrintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/unwind.h
OLD_FILES+=usr/lib/clang/13.0.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/13.0.0/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/varargs.h
OLD_FILES+=usr/lib/clang/13.0.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/waitpkgintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/wasm_simd128.h
OLD_FILES+=usr/lib/clang/13.0.0/include/wbnoinvdintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/x86gprintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/13.0.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/13.0.0/include
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.asan-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.asan-aarch64.so
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.asan-arm.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.asan-arm.so
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.asan-armhf.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.asan-armhf.so
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.asan-preinit-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.asan-preinit-arm.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.asan-preinit-armhf.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.asan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.asan_cxx-arm.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.asan_cxx-armhf.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.cfi-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.cfi-arm.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.cfi-armhf.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.cfi-i386.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.cfi-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.cfi_diag-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.cfi_diag-arm.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.cfi_diag-armhf.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.cfi_diag-i386.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.cfi_diag-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.dd-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.dd-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.fuzzer-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.fuzzer-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.fuzzer_no_main-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.fuzzer_no_main-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.msan-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.msan-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.msan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.profile-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.profile-powerpc.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.profile-powerpc64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.profile-powerpc64le.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.safestack-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.stats-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.stats-arm.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.stats-armhf.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.stats_client-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.stats_client-arm.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.stats_client-armhf.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.tsan-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.tsan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.ubsan_minimal-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.ubsan_minimal-arm.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.ubsan_minimal-armhf.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.ubsan_standalone-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.ubsan_standalone-arm.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.ubsan_standalone-armhf.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-arm.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-armhf.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.xray-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.xray-arm.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.xray-armhf.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.xray-basic-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.xray-basic-arm.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.xray-basic-armhf.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.xray-basic-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.xray-fdr-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.xray-fdr-arm.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.xray-fdr-armhf.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.xray-fdr-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.xray-profiling-aarch64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.xray-profiling-arm.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.xray-profiling-armhf.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.xray-profiling-x86_64.a
OLD_FILES+=usr/lib/clang/13.0.0/lib/freebsd/libclang_rt.xray-x86_64.a
OLD_DIRS+=usr/lib/clang/13.0.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/13.0.0/lib
OLD_DIRS+=usr/lib/clang/13.0.0
# 20220514: new libc++ import which bumps version from 13.0.0 to 14.0.3
OLD_FILES+=usr/include/c++/v1/__function_like.h
OLD_FILES+=usr/include/c++/v1/__memory/pointer_safety.h
OLD_FILES+=usr/include/c++/v1/__utility/__decay_copy.h
# 20220418: uudecode merged into uuencode and renamed to bintrans
OLD_FILES+=usr/lib/debug/usr/bin/uuencode.debug
OLD_FILES+=usr/lib/debug/usr/bin/uudecode.debug
OLD_FILES+=usr/tests/usr.bin/uuencode/Kyuafile
OLD_FILES+=usr/tests/usr.bin/uuencode/legacy_test
OLD_FILES+=usr/tests/usr.bin/uuencode/regress.base64.in
OLD_FILES+=usr/tests/usr.bin/uuencode/regress.traditional.out
OLD_FILES+=usr/tests/usr.bin/uuencode/regress.traditional.in
OLD_FILES+=usr/tests/usr.bin/uuencode/regress.out
OLD_FILES+=usr/tests/usr.bin/uuencode/regress.sh
OLD_FILES+=usr/tests/usr.bin/uuencode/regress.in
OLD_FILES+=usr/tests/usr.bin/uuencode/regress.153276.in
OLD_FILES+=usr/tests/usr.bin/uuencode/regress.base64.out
OLD_FILES+=usr/tests/usr.bin/uuencode/regress.153276.out
OLD_FILES+=usr/tests/usr.bin/uudecode/regress.base64.in
OLD_FILES+=usr/tests/usr.bin/uudecode/Kyuafile
OLD_FILES+=usr/tests/usr.bin/uudecode/regress.out
OLD_FILES+=usr/tests/usr.bin/uudecode/regress.sh
OLD_FILES+=usr/tests/usr.bin/uudecode/regress.153276.out
OLD_FILES+=usr/tests/usr.bin/uudecode/regress.traditional.in
OLD_FILES+=usr/tests/usr.bin/uudecode/legacy_test
OLD_FILES+=usr/tests/usr.bin/uudecode/regress.153276.in
OLD_DIRS+=usr/tests/usr.bin/uuencode
OLD_DIRS+=usr/tests/usr.bin/uudecode
# 20220318: snd_ds1 and snd_maestro drivers removed
OLD_FILES+=usr/share/man/man4/snd_ds1.4.gz
OLD_FILES+=usr/share/man/man4/snd_maestro.4.gz
# 20220307: remove 330.news
OLD_FILES+=etc/periodic/daily/330.news
# 20220210: unwind.h moved to /usr/include
OLD_FILES+=usr/include/c++/v1/unwind-arm.h
OLD_FILES+=usr/include/c++/v1/unwind-itanium.h
OLD_FILES+=usr/include/c++/v1/unwind.h
# 20220128: mips pmc events removed
OLD_FILES+=usr/share/man/man3/pmc.mips24k.3.gz
OLD_FILES+=usr/share/man/man3/pmc.octeon.3.gz
# 20220127: terasic_mtl.4 removed
OLD_FILES+=usr/share/man/man4/terasic_mtl.4.gz
# 20211229: libc++ moved to /lib
MOVED_LIBS+=usr/lib/libc++.so.1
# 20211221: efi_set_variables_supported.3 should be efi_variables_supported.3
OLD_FILES+=usr/share/man/man3/efi_set_variables_supported.3.gz
# 20211218: meteor.4 removed, see also 20200301 entry below
OLD_FILES+=usr/share/man/man4/meteor.4.gz
# 20211125: Old SCSI drivers removed
OLD_FILES+=usr/include/dev/ic/esp.h
OLD_FILES+=usr/share/man/man4/amr.4.gz
OLD_FILES+=usr/share/man/man4/esp.4.gz
OLD_FILES+=usr/share/man/man4/iir.4.gz
OLD_FILES+=usr/share/man/man4/mly.4.gz
OLD_FILES+=usr/share/man/man4/twa.4.gz
# 20211120: kernel_vmount function removed
OLD_FILES+=usr/share/man/man9/kernel_vmount.9.gz
# 20211115: vm_page busy functions removed
OLD_FILES+=share/man/man9/vm_page_sbusy.9.gz
OLD_FILES+=share/man/man9/vm_page_xbusy.9.gz
OLD_FILES+=share/man/man9/vm_page_sleep_if_busy.9.gz
# 20211113: new clang import which bumps version from 12.0.1 to 13.0.0
OLD_FILES+=usr/lib/clang/12.0.1/include/cuda_wrappers/algorithm
OLD_FILES+=usr/lib/clang/12.0.1/include/cuda_wrappers/complex
OLD_FILES+=usr/lib/clang/12.0.1/include/cuda_wrappers/new
OLD_DIRS+=usr/lib/clang/12.0.1/include/cuda_wrappers
OLD_FILES+=usr/lib/clang/12.0.1/include/fuzzer/FuzzedDataProvider.h
OLD_DIRS+=usr/lib/clang/12.0.1/include/fuzzer
OLD_FILES+=usr/lib/clang/12.0.1/include/openmp_wrappers/__clang_openmp_device_functions.h
OLD_FILES+=usr/lib/clang/12.0.1/include/openmp_wrappers/cmath
OLD_FILES+=usr/lib/clang/12.0.1/include/openmp_wrappers/complex
OLD_FILES+=usr/lib/clang/12.0.1/include/openmp_wrappers/complex.h
OLD_FILES+=usr/lib/clang/12.0.1/include/openmp_wrappers/complex_cmath.h
OLD_FILES+=usr/lib/clang/12.0.1/include/openmp_wrappers/math.h
OLD_FILES+=usr/lib/clang/12.0.1/include/openmp_wrappers/new
OLD_DIRS+=usr/lib/clang/12.0.1/include/openmp_wrappers
OLD_FILES+=usr/lib/clang/12.0.1/include/ppc_wrappers/emmintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/ppc_wrappers/mm_malloc.h
OLD_FILES+=usr/lib/clang/12.0.1/include/ppc_wrappers/mmintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/ppc_wrappers/pmmintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/ppc_wrappers/smmintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/ppc_wrappers/tmmintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/ppc_wrappers/xmmintrin.h
OLD_DIRS+=usr/lib/clang/12.0.1/include/ppc_wrappers
OLD_FILES+=usr/lib/clang/12.0.1/include/profile/InstrProfData.inc
OLD_DIRS+=usr/lib/clang/12.0.1/include/profile
OLD_FILES+=usr/lib/clang/12.0.1/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/12.0.1/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/12.0.1/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/12.0.1/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/12.0.1/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/12.0.1/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/12.0.1/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/12.0.1/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/12.0.1/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/12.0.1/include/sanitizer/netbsd_syscall_hooks.h
OLD_FILES+=usr/lib/clang/12.0.1/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/12.0.1/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/12.0.1/include/sanitizer/tsan_interface_atomic.h
OLD_FILES+=usr/lib/clang/12.0.1/include/sanitizer/ubsan_interface.h
OLD_DIRS+=usr/lib/clang/12.0.1/include/sanitizer
OLD_FILES+=usr/lib/clang/12.0.1/include/xray/xray_interface.h
OLD_FILES+=usr/lib/clang/12.0.1/include/xray/xray_log_interface.h
OLD_FILES+=usr/lib/clang/12.0.1/include/xray/xray_records.h
OLD_DIRS+=usr/lib/clang/12.0.1/include/xray
OLD_FILES+=usr/lib/clang/12.0.1/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/12.0.1/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/12.0.1/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/12.0.1/include/__clang_cuda_device_functions.h
OLD_FILES+=usr/lib/clang/12.0.1/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/12.0.1/include/__clang_cuda_libdevice_declares.h
OLD_FILES+=usr/lib/clang/12.0.1/include/__clang_cuda_math.h
OLD_FILES+=usr/lib/clang/12.0.1/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/12.0.1/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/12.0.1/include/__clang_hip_cmath.h
OLD_FILES+=usr/lib/clang/12.0.1/include/__clang_hip_libdevice_declares.h
OLD_FILES+=usr/lib/clang/12.0.1/include/__clang_hip_math.h
OLD_FILES+=usr/lib/clang/12.0.1/include/__clang_hip_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/12.0.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/12.0.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/12.0.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/12.0.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/altivec.h
OLD_FILES+=usr/lib/clang/12.0.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/amxintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/arm64intr.h
OLD_FILES+=usr/lib/clang/12.0.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/12.0.1/include/arm_bf16.h
OLD_FILES+=usr/lib/clang/12.0.1/include/arm_cde.h
OLD_FILES+=usr/lib/clang/12.0.1/include/arm_cmse.h
OLD_FILES+=usr/lib/clang/12.0.1/include/arm_fp16.h
OLD_FILES+=usr/lib/clang/12.0.1/include/arm_mve.h
OLD_FILES+=usr/lib/clang/12.0.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/12.0.1/include/arm_sve.h
OLD_FILES+=usr/lib/clang/12.0.1/include/armintr.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512bf16intrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512vlbf16intrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512vlvp2intersectintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512vp2intersectintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/avxvnniintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/cet.h
OLD_FILES+=usr/lib/clang/12.0.1/include/cetintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/cldemoteintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/12.0.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/enqcmdintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/float.h
OLD_FILES+=usr/lib/clang/12.0.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/hresetintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/htmintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/intrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/inttypes.h
OLD_FILES+=usr/lib/clang/12.0.1/include/invpcidintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/iso646.h
OLD_FILES+=usr/lib/clang/12.0.1/include/keylockerintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/limits.h
OLD_FILES+=usr/lib/clang/12.0.1/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/12.0.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/12.0.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/12.0.1/include/movdirintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/msa.h
OLD_FILES+=usr/lib/clang/12.0.1/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/omp-tools.h
OLD_FILES+=usr/lib/clang/12.0.1/include/omp.h
OLD_FILES+=usr/lib/clang/12.0.1/include/ompt.h
OLD_FILES+=usr/lib/clang/12.0.1/include/opencl-c-base.h
OLD_FILES+=usr/lib/clang/12.0.1/include/opencl-c.h
OLD_FILES+=usr/lib/clang/12.0.1/include/pconfigintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/ptwriteintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/s390intrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/serializeintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/sgxintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/stdalign.h
OLD_FILES+=usr/lib/clang/12.0.1/include/stdarg.h
OLD_FILES+=usr/lib/clang/12.0.1/include/stdatomic.h
OLD_FILES+=usr/lib/clang/12.0.1/include/stdbool.h
OLD_FILES+=usr/lib/clang/12.0.1/include/stddef.h
OLD_FILES+=usr/lib/clang/12.0.1/include/stdint.h
OLD_FILES+=usr/lib/clang/12.0.1/include/stdnoreturn.h
OLD_FILES+=usr/lib/clang/12.0.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/tgmath.h
OLD_FILES+=usr/lib/clang/12.0.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/tsxldtrkintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/uintrintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/unwind.h
OLD_FILES+=usr/lib/clang/12.0.1/include/vadefs.h
OLD_FILES+=usr/lib/clang/12.0.1/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/varargs.h
OLD_FILES+=usr/lib/clang/12.0.1/include/vecintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/waitpkgintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/wasm_simd128.h
OLD_FILES+=usr/lib/clang/12.0.1/include/wbnoinvdintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/x86gprintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/xopintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/12.0.1/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/12.0.1/include
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.asan-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.asan-aarch64.so
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.asan-arm.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.asan-arm.so
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.asan-armhf.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.asan-armhf.so
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.asan-preinit-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.asan-preinit-arm.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.asan-preinit-armhf.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.asan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.asan_cxx-arm.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.asan_cxx-armhf.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.cfi-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.cfi-arm.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.cfi-armhf.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.cfi-i386.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.cfi-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.cfi_diag-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.cfi_diag-arm.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.cfi_diag-armhf.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.cfi_diag-i386.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.cfi_diag-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.dd-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.dd-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.fuzzer-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.fuzzer-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.fuzzer_no_main-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.fuzzer_no_main-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.msan-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.msan-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.msan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.profile-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.profile-powerpc.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.profile-powerpc64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.profile-powerpc64le.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.safestack-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.stats-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.stats-arm.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.stats-armhf.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.stats_client-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.stats_client-arm.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.stats_client-armhf.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.tsan-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.tsan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.ubsan_minimal-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.ubsan_minimal-arm.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.ubsan_minimal-armhf.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.ubsan_standalone-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.ubsan_standalone-arm.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.ubsan_standalone-armhf.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-arm.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-armhf.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.xray-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.xray-arm.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.xray-armhf.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.xray-basic-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.xray-basic-arm.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.xray-basic-armhf.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.xray-basic-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.xray-fdr-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.xray-fdr-arm.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.xray-fdr-armhf.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.xray-fdr-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.xray-profiling-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.xray-profiling-arm.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.xray-profiling-armhf.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.xray-profiling-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.1/lib/freebsd/libclang_rt.xray-x86_64.a
OLD_DIRS+=usr/lib/clang/12.0.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/12.0.1/lib
OLD_DIRS+=usr/lib/clang/12.0.1
# 20211113: new libc++ import which bumps version from 12.0.1 to 13.0.0
OLD_FILES+=usr/include/c++/v1/__functional_03
OLD_FILES+=usr/include/c++/v1/__functional_base_03
OLD_FILES+=usr/include/c++/v1/__memory/base.h
OLD_FILES+=usr/include/c++/v1/__memory/utilities.h
OLD_FILES+=usr/include/c++/v1/__sso_allocator
OLD_FILES+=usr/include/c++/v1/tr1/__availability
OLD_FILES+=usr/include/c++/v1/tr1/__bit_reference
OLD_FILES+=usr/include/c++/v1/tr1/__bits
OLD_FILES+=usr/include/c++/v1/tr1/__bsd_locale_defaults.h
OLD_FILES+=usr/include/c++/v1/tr1/__bsd_locale_fallbacks.h
OLD_FILES+=usr/include/c++/v1/tr1/__config
OLD_FILES+=usr/include/c++/v1/tr1/__debug
OLD_FILES+=usr/include/c++/v1/tr1/__errc
OLD_FILES+=usr/include/c++/v1/tr1/__functional_03
OLD_FILES+=usr/include/c++/v1/tr1/__functional_base
OLD_FILES+=usr/include/c++/v1/tr1/__functional_base_03
OLD_FILES+=usr/include/c++/v1/tr1/__hash_table
OLD_FILES+=usr/include/c++/v1/tr1/__libcpp_version
OLD_FILES+=usr/include/c++/v1/tr1/__locale
OLD_FILES+=usr/include/c++/v1/tr1/__mutex_base
OLD_FILES+=usr/include/c++/v1/tr1/__node_handle
OLD_FILES+=usr/include/c++/v1/tr1/__nullptr
OLD_FILES+=usr/include/c++/v1/tr1/__split_buffer
OLD_FILES+=usr/include/c++/v1/tr1/__sso_allocator
OLD_FILES+=usr/include/c++/v1/tr1/__std_stream
OLD_FILES+=usr/include/c++/v1/tr1/__string
OLD_FILES+=usr/include/c++/v1/tr1/__threading_support
OLD_FILES+=usr/include/c++/v1/tr1/__tree
OLD_FILES+=usr/include/c++/v1/tr1/__tuple
OLD_FILES+=usr/include/c++/v1/tr1/__undef_macros
OLD_FILES+=usr/include/c++/v1/tr1/algorithm
OLD_FILES+=usr/include/c++/v1/tr1/any
OLD_FILES+=usr/include/c++/v1/tr1/array
OLD_FILES+=usr/include/c++/v1/tr1/atomic
OLD_FILES+=usr/include/c++/v1/tr1/barrier
OLD_FILES+=usr/include/c++/v1/tr1/bit
OLD_FILES+=usr/include/c++/v1/tr1/bitset
OLD_FILES+=usr/include/c++/v1/tr1/cassert
OLD_FILES+=usr/include/c++/v1/tr1/ccomplex
OLD_FILES+=usr/include/c++/v1/tr1/cctype
OLD_FILES+=usr/include/c++/v1/tr1/cerrno
OLD_FILES+=usr/include/c++/v1/tr1/cfenv
OLD_FILES+=usr/include/c++/v1/tr1/cfloat
OLD_FILES+=usr/include/c++/v1/tr1/charconv
OLD_FILES+=usr/include/c++/v1/tr1/chrono
OLD_FILES+=usr/include/c++/v1/tr1/cinttypes
OLD_FILES+=usr/include/c++/v1/tr1/ciso646
OLD_FILES+=usr/include/c++/v1/tr1/climits
OLD_FILES+=usr/include/c++/v1/tr1/clocale
OLD_FILES+=usr/include/c++/v1/tr1/cmath
OLD_FILES+=usr/include/c++/v1/tr1/codecvt
OLD_FILES+=usr/include/c++/v1/tr1/compare
OLD_FILES+=usr/include/c++/v1/tr1/complex
OLD_FILES+=usr/include/c++/v1/tr1/complex.h
OLD_FILES+=usr/include/c++/v1/tr1/concepts
OLD_FILES+=usr/include/c++/v1/tr1/condition_variable
OLD_FILES+=usr/include/c++/v1/tr1/csetjmp
OLD_FILES+=usr/include/c++/v1/tr1/csignal
OLD_FILES+=usr/include/c++/v1/tr1/cstdarg
OLD_FILES+=usr/include/c++/v1/tr1/cstdbool
OLD_FILES+=usr/include/c++/v1/tr1/cstddef
OLD_FILES+=usr/include/c++/v1/tr1/cstdint
OLD_FILES+=usr/include/c++/v1/tr1/cstdio
OLD_FILES+=usr/include/c++/v1/tr1/cstdlib
OLD_FILES+=usr/include/c++/v1/tr1/cstring
OLD_FILES+=usr/include/c++/v1/tr1/ctgmath
OLD_FILES+=usr/include/c++/v1/tr1/ctime
OLD_FILES+=usr/include/c++/v1/tr1/ctype.h
OLD_FILES+=usr/include/c++/v1/tr1/cwchar
OLD_FILES+=usr/include/c++/v1/tr1/cwctype
OLD_FILES+=usr/include/c++/v1/tr1/deque
OLD_FILES+=usr/include/c++/v1/tr1/errno.h
OLD_FILES+=usr/include/c++/v1/tr1/exception
OLD_FILES+=usr/include/c++/v1/tr1/execution
OLD_FILES+=usr/include/c++/v1/tr1/fenv.h
OLD_FILES+=usr/include/c++/v1/tr1/filesystem
OLD_FILES+=usr/include/c++/v1/tr1/float.h
OLD_FILES+=usr/include/c++/v1/tr1/forward_list
OLD_FILES+=usr/include/c++/v1/tr1/fstream
OLD_FILES+=usr/include/c++/v1/tr1/functional
OLD_FILES+=usr/include/c++/v1/tr1/future
OLD_FILES+=usr/include/c++/v1/tr1/initializer_list
OLD_FILES+=usr/include/c++/v1/tr1/inttypes.h
OLD_FILES+=usr/include/c++/v1/tr1/iomanip
OLD_FILES+=usr/include/c++/v1/tr1/ios
OLD_FILES+=usr/include/c++/v1/tr1/iosfwd
OLD_FILES+=usr/include/c++/v1/tr1/iostream
OLD_FILES+=usr/include/c++/v1/tr1/istream
OLD_FILES+=usr/include/c++/v1/tr1/iterator
OLD_FILES+=usr/include/c++/v1/tr1/latch
OLD_FILES+=usr/include/c++/v1/tr1/limits
OLD_FILES+=usr/include/c++/v1/tr1/limits.h
OLD_FILES+=usr/include/c++/v1/tr1/list
OLD_FILES+=usr/include/c++/v1/tr1/locale
OLD_FILES+=usr/include/c++/v1/tr1/locale.h
OLD_FILES+=usr/include/c++/v1/tr1/map
OLD_FILES+=usr/include/c++/v1/tr1/math.h
OLD_FILES+=usr/include/c++/v1/tr1/memory
OLD_FILES+=usr/include/c++/v1/tr1/mutex
OLD_FILES+=usr/include/c++/v1/tr1/new
OLD_FILES+=usr/include/c++/v1/tr1/numbers
OLD_FILES+=usr/include/c++/v1/tr1/numeric
OLD_FILES+=usr/include/c++/v1/tr1/optional
OLD_FILES+=usr/include/c++/v1/tr1/ostream
OLD_FILES+=usr/include/c++/v1/tr1/queue
OLD_FILES+=usr/include/c++/v1/tr1/random
OLD_FILES+=usr/include/c++/v1/tr1/ratio
OLD_FILES+=usr/include/c++/v1/tr1/regex
OLD_FILES+=usr/include/c++/v1/tr1/scoped_allocator
OLD_FILES+=usr/include/c++/v1/tr1/semaphore
OLD_FILES+=usr/include/c++/v1/tr1/set
OLD_FILES+=usr/include/c++/v1/tr1/setjmp.h
OLD_FILES+=usr/include/c++/v1/tr1/shared_mutex
OLD_FILES+=usr/include/c++/v1/tr1/span
OLD_FILES+=usr/include/c++/v1/tr1/sstream
OLD_FILES+=usr/include/c++/v1/tr1/stack
OLD_FILES+=usr/include/c++/v1/tr1/stdbool.h
OLD_FILES+=usr/include/c++/v1/tr1/stddef.h
OLD_FILES+=usr/include/c++/v1/tr1/stdexcept
OLD_FILES+=usr/include/c++/v1/tr1/stdint.h
OLD_FILES+=usr/include/c++/v1/tr1/stdio.h
OLD_FILES+=usr/include/c++/v1/tr1/stdlib.h
OLD_FILES+=usr/include/c++/v1/tr1/streambuf
OLD_FILES+=usr/include/c++/v1/tr1/string
OLD_FILES+=usr/include/c++/v1/tr1/string.h
OLD_FILES+=usr/include/c++/v1/tr1/string_view
OLD_FILES+=usr/include/c++/v1/tr1/strstream
OLD_FILES+=usr/include/c++/v1/tr1/system_error
OLD_FILES+=usr/include/c++/v1/tr1/tgmath.h
OLD_FILES+=usr/include/c++/v1/tr1/thread
OLD_FILES+=usr/include/c++/v1/tr1/tuple
OLD_FILES+=usr/include/c++/v1/tr1/type_traits
OLD_FILES+=usr/include/c++/v1/tr1/typeindex
OLD_FILES+=usr/include/c++/v1/tr1/typeinfo
OLD_FILES+=usr/include/c++/v1/tr1/unordered_map
OLD_FILES+=usr/include/c++/v1/tr1/unordered_set
OLD_FILES+=usr/include/c++/v1/tr1/utility
OLD_FILES+=usr/include/c++/v1/tr1/valarray
OLD_FILES+=usr/include/c++/v1/tr1/variant
OLD_FILES+=usr/include/c++/v1/tr1/vector
OLD_FILES+=usr/include/c++/v1/tr1/version
OLD_FILES+=usr/include/c++/v1/tr1/wchar.h
OLD_FILES+=usr/include/c++/v1/tr1/wctype.h
OLD_DIRS+=usr/include/c++/v1/tr1
# 20211027: libdialog shlib bumped to version 10 for dialog 1.3
OLD_LIBS+=usr/lib/libdialog.so.9
OLD_LIBS+=usr/lib/libdpv.so.2
# 20211026: retire obsolete iscsi initiator
OLD_FILES+=sbin/iscontrol
OLD_FILES+=usr/share/man/man4/iscsi_initiator.4.gz
OLD_FILES+=usr/share/man/man8/iscontrol.8.gz
# 20211022
OLD_FILES+=etc/rc.d/sppp
OLD_FILES+=rescue/spppcontrol
OLD_FILES+=sbin/spppcontrol
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/machine/cserial.h
.endif
OLD_FILES+=usr/include/net/if_sppp.h
OLD_FILES+=usr/include/netgraph/ng_sppp.h
OLD_FILES+=usr/share/man/man4/ng_sppp.4.gz
OLD_FILES+=usr/share/man/man4/sppp.4.gz
OLD_FILES+=usr/share/man/man8/spppcontrol.8.gz
# 20211004: Removed sparc64 tests for lastcomm/sa
OLD_FILES+=usr/tests/usr.bin/lastcomm/v1-sparc64-acct.in
OLD_FILES+=usr/tests/usr.bin/lastcomm/v1-sparc64.out
OLD_FILES+=usr/tests/usr.bin/lastcomm/v2-sparc64-acct.in
OLD_FILES+=usr/tests/usr.bin/lastcomm/v2-sparc64.out
OLD_FILES+=usr/tests/usr.sbin/sa/v1-sparc64-sav.in
OLD_FILES+=usr/tests/usr.sbin/sa/v1-sparc64-sav.out
OLD_FILES+=usr/tests/usr.sbin/sa/v1-sparc64-u.out
OLD_FILES+=usr/tests/usr.sbin/sa/v1-sparc64-usr.in
OLD_FILES+=usr/tests/usr.sbin/sa/v1-sparc64-usr.out
OLD_FILES+=usr/tests/usr.sbin/sa/v2-sparc64-sav.in
OLD_FILES+=usr/tests/usr.sbin/sa/v2-sparc64-u.out
OLD_FILES+=usr/tests/usr.sbin/sa/v2-sparc64-usr.in
# 20210929:
OLD_FILES+=usr/sbin/hcseriald
OLD_FILES+=usr/share/man/man8/hcseriald.8.gz
# 20210929: Remove ng_h4
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_h4.h
OLD_FILES+=usr/share/man/man4/ng_h4.4.gz
# 20210923: rename boot(9) to kern_reboot(9)
OLD_FILES+=usr/share/man/man9/boot.9.gz
# 20210921: remove cloudabi
OLD_FILES+=usr/share/man/man4/cloudabi.4.gz
OLD_FILES+=usr/share/man/man4/cloudabi32.4.gz
OLD_FILES+=usr/share/man/man4/cloudabi64.4.gz
# 20210906: stop installing {llvm,clang,lldb}-tblgen
OLD_FILES+=usr/bin/llvm-tblgen
OLD_FILES+=usr/bin/clang-tblgen
OLD_FILES+=usr/bin/lldb-tblgen
OLD_FILES+=usr/share/man/man1/llvm-tblgen.1.gz
# 20210810: remove Pentium-related man pages and references
OLD_FILES+=usr/share/man/man3/pmc.p4.3.gz
OLD_FILES+=usr/share/man/man3/pmc.p5.3.gz
OLD_FILES+=usr/share/man/man3/pmc.p6.3.gz
# 20210805: C.UTF-8 installed to the wrong location
OLD_FILES+=usr/share/C.UTF-8.LC_CTYPE
# 20210619: new clang import which bumps version from 12.0.0 to 12.0.1
OLD_FILES+=usr/lib/clang/12.0.0/include/cuda_wrappers/algorithm
OLD_FILES+=usr/lib/clang/12.0.0/include/cuda_wrappers/complex
OLD_FILES+=usr/lib/clang/12.0.0/include/cuda_wrappers/new
OLD_DIRS+=usr/lib/clang/12.0.0/include/cuda_wrappers
OLD_FILES+=usr/lib/clang/12.0.0/include/fuzzer/FuzzedDataProvider.h
OLD_DIRS+=usr/lib/clang/12.0.0/include/fuzzer
OLD_FILES+=usr/lib/clang/12.0.0/include/openmp_wrappers/__clang_openmp_device_functions.h
OLD_FILES+=usr/lib/clang/12.0.0/include/openmp_wrappers/cmath
OLD_FILES+=usr/lib/clang/12.0.0/include/openmp_wrappers/complex
OLD_FILES+=usr/lib/clang/12.0.0/include/openmp_wrappers/complex.h
OLD_FILES+=usr/lib/clang/12.0.0/include/openmp_wrappers/complex_cmath.h
OLD_FILES+=usr/lib/clang/12.0.0/include/openmp_wrappers/math.h
OLD_FILES+=usr/lib/clang/12.0.0/include/openmp_wrappers/new
OLD_DIRS+=usr/lib/clang/12.0.0/include/openmp_wrappers
OLD_FILES+=usr/lib/clang/12.0.0/include/ppc_wrappers/emmintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/ppc_wrappers/mm_malloc.h
OLD_FILES+=usr/lib/clang/12.0.0/include/ppc_wrappers/mmintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/ppc_wrappers/pmmintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/ppc_wrappers/smmintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/ppc_wrappers/tmmintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/ppc_wrappers/xmmintrin.h
OLD_DIRS+=usr/lib/clang/12.0.0/include/ppc_wrappers
OLD_FILES+=usr/lib/clang/12.0.0/include/profile/InstrProfData.inc
OLD_DIRS+=usr/lib/clang/12.0.0/include/profile
OLD_FILES+=usr/lib/clang/12.0.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/12.0.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/12.0.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/12.0.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/12.0.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/12.0.0/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/12.0.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/12.0.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/12.0.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/12.0.0/include/sanitizer/netbsd_syscall_hooks.h
OLD_FILES+=usr/lib/clang/12.0.0/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/12.0.0/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/12.0.0/include/sanitizer/tsan_interface_atomic.h
OLD_FILES+=usr/lib/clang/12.0.0/include/sanitizer/ubsan_interface.h
OLD_DIRS+=usr/lib/clang/12.0.0/include/sanitizer
OLD_FILES+=usr/lib/clang/12.0.0/include/xray/xray_interface.h
OLD_FILES+=usr/lib/clang/12.0.0/include/xray/xray_log_interface.h
OLD_FILES+=usr/lib/clang/12.0.0/include/xray/xray_records.h
OLD_DIRS+=usr/lib/clang/12.0.0/include/xray
OLD_FILES+=usr/lib/clang/12.0.0/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/12.0.0/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/12.0.0/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/12.0.0/include/__clang_cuda_device_functions.h
OLD_FILES+=usr/lib/clang/12.0.0/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/12.0.0/include/__clang_cuda_libdevice_declares.h
OLD_FILES+=usr/lib/clang/12.0.0/include/__clang_cuda_math.h
OLD_FILES+=usr/lib/clang/12.0.0/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/12.0.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/12.0.0/include/__clang_hip_cmath.h
OLD_FILES+=usr/lib/clang/12.0.0/include/__clang_hip_libdevice_declares.h
OLD_FILES+=usr/lib/clang/12.0.0/include/__clang_hip_math.h
OLD_FILES+=usr/lib/clang/12.0.0/include/__clang_hip_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/12.0.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/12.0.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/12.0.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/12.0.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/altivec.h
OLD_FILES+=usr/lib/clang/12.0.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/amxintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/arm64intr.h
OLD_FILES+=usr/lib/clang/12.0.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/12.0.0/include/arm_bf16.h
OLD_FILES+=usr/lib/clang/12.0.0/include/arm_cde.h
OLD_FILES+=usr/lib/clang/12.0.0/include/arm_cmse.h
OLD_FILES+=usr/lib/clang/12.0.0/include/arm_fp16.h
OLD_FILES+=usr/lib/clang/12.0.0/include/arm_mve.h
OLD_FILES+=usr/lib/clang/12.0.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/12.0.0/include/arm_sve.h
OLD_FILES+=usr/lib/clang/12.0.0/include/armintr.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512bf16intrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512vlbf16intrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512vlvp2intersectintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512vp2intersectintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/avxvnniintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/cet.h
OLD_FILES+=usr/lib/clang/12.0.0/include/cetintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/cldemoteintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/12.0.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/enqcmdintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/float.h
OLD_FILES+=usr/lib/clang/12.0.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/hresetintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/intrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/inttypes.h
OLD_FILES+=usr/lib/clang/12.0.0/include/invpcidintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/iso646.h
OLD_FILES+=usr/lib/clang/12.0.0/include/keylockerintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/limits.h
OLD_FILES+=usr/lib/clang/12.0.0/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/12.0.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/12.0.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/12.0.0/include/movdirintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/msa.h
OLD_FILES+=usr/lib/clang/12.0.0/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/omp-tools.h
OLD_FILES+=usr/lib/clang/12.0.0/include/omp.h
OLD_FILES+=usr/lib/clang/12.0.0/include/ompt.h
OLD_FILES+=usr/lib/clang/12.0.0/include/opencl-c-base.h
OLD_FILES+=usr/lib/clang/12.0.0/include/opencl-c.h
OLD_FILES+=usr/lib/clang/12.0.0/include/pconfigintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/ptwriteintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/serializeintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/sgxintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/stdalign.h
OLD_FILES+=usr/lib/clang/12.0.0/include/stdarg.h
OLD_FILES+=usr/lib/clang/12.0.0/include/stdatomic.h
OLD_FILES+=usr/lib/clang/12.0.0/include/stdbool.h
OLD_FILES+=usr/lib/clang/12.0.0/include/stddef.h
OLD_FILES+=usr/lib/clang/12.0.0/include/stdint.h
OLD_FILES+=usr/lib/clang/12.0.0/include/stdnoreturn.h
OLD_FILES+=usr/lib/clang/12.0.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/tgmath.h
OLD_FILES+=usr/lib/clang/12.0.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/tsxldtrkintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/uintrintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/unwind.h
OLD_FILES+=usr/lib/clang/12.0.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/12.0.0/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/varargs.h
OLD_FILES+=usr/lib/clang/12.0.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/waitpkgintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/wasm_simd128.h
OLD_FILES+=usr/lib/clang/12.0.0/include/wbnoinvdintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/x86gprintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/12.0.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/12.0.0/include
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.asan-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.asan-aarch64.so
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.asan-arm.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.asan-arm.so
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.asan-armhf.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.asan-armhf.so
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.asan-preinit-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.asan-preinit-arm.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.asan-preinit-armhf.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.asan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.asan_cxx-arm.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.asan_cxx-armhf.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.cfi-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.cfi-arm.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.cfi-armhf.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.cfi-i386.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.cfi-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.cfi_diag-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.cfi_diag-arm.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.cfi_diag-armhf.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.cfi_diag-i386.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.cfi_diag-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.dd-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.dd-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.fuzzer-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.fuzzer-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.fuzzer_no_main-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.fuzzer_no_main-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.msan-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.msan-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.msan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.profile-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.profile-powerpc.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.profile-powerpc64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.profile-powerpc64le.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.safestack-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.stats-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.stats-arm.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.stats-armhf.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.stats_client-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.stats_client-arm.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.stats_client-armhf.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.tsan-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.tsan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.ubsan_minimal-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.ubsan_minimal-arm.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.ubsan_minimal-armhf.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.ubsan_standalone-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.ubsan_standalone-arm.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.ubsan_standalone-armhf.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-arm.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-armhf.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.xray-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.xray-arm.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.xray-armhf.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.xray-basic-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.xray-basic-arm.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.xray-basic-armhf.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.xray-basic-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.xray-fdr-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.xray-fdr-arm.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.xray-fdr-armhf.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.xray-fdr-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.xray-profiling-aarch64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.xray-profiling-arm.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.xray-profiling-armhf.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.xray-profiling-x86_64.a
OLD_FILES+=usr/lib/clang/12.0.0/lib/freebsd/libclang_rt.xray-x86_64.a
OLD_DIRS+=usr/lib/clang/12.0.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/12.0.0/lib
OLD_DIRS+=usr/lib/clang/12.0.0
# 20210616: Remove crypto_cursor_seg{base,len}
OLD_FILES+=usr/share/man/man9/crypto_cursor_segbase.9.gz
OLD_FILES+=usr/share/man/man9/crypto_cursor_seglen.9.gz
# 20210618: rename of usr/share/certs/blacklisted
OLD_FILES+=usr/share/certs/blacklisted/AddTrust_External_Root.pem
OLD_FILES+=usr/share/certs/blacklisted/AddTrust_Low-Value_Services_Root.pem
OLD_FILES+=usr/share/certs/blacklisted/Camerfirma_Chambers_of_Commerce_Root.pem
OLD_FILES+=usr/share/certs/blacklisted/Camerfirma_Global_Chambersign_Root.pem
OLD_FILES+=usr/share/certs/blacklisted/Certum_Root_CA.pem
OLD_FILES+=usr/share/certs/blacklisted/Chambers_of_Commerce_Root_-_2008.pem
OLD_FILES+=usr/share/certs/blacklisted/D-TRUST_Root_CA_3_2013.pem
OLD_FILES+=usr/share/certs/blacklisted/EC-ACC.pem
OLD_FILES+=usr/share/certs/blacklisted/EE_Certification_Centre_Root_CA.pem
OLD_FILES+=usr/share/certs/blacklisted/GeoTrust_Global_CA.pem
OLD_FILES+=usr/share/certs/blacklisted/GeoTrust_Primary_Certification_Authority_-_G2.pem
OLD_FILES+=usr/share/certs/blacklisted/GeoTrust_Primary_Certification_Authority_-_G3.pem
OLD_FILES+=usr/share/certs/blacklisted/GeoTrust_Primary_Certification_Authority.pem
OLD_FILES+=usr/share/certs/blacklisted/GeoTrust_Universal_CA_2.pem
OLD_FILES+=usr/share/certs/blacklisted/GeoTrust_Universal_CA.pem
OLD_FILES+=usr/share/certs/blacklisted/Global_Chambersign_Root_-_2008.pem
OLD_FILES+=usr/share/certs/blacklisted/LuxTrust_Global_Root_2.pem
OLD_FILES+=usr/share/certs/blacklisted/OISTE_WISeKey_Global_Root_GA_CA.pem
OLD_FILES+=usr/share/certs/blacklisted/QuoVadis_Root_CA.pem
OLD_FILES+=usr/share/certs/blacklisted/Sonera_Class_2_Root_CA.pem
OLD_FILES+=usr/share/certs/blacklisted/Staat_der_Nederlanden_Root_CA_-_G2.pem
OLD_FILES+=usr/share/certs/blacklisted/Staat_der_Nederlanden_Root_CA_-_G3.pem
OLD_FILES+=usr/share/certs/blacklisted/SwissSign_Platinum_CA_-_G2.pem
OLD_FILES+=usr/share/certs/blacklisted/Symantec_Class_1_Public_Primary_Certification_Authority_-_G4.pem
OLD_FILES+=usr/share/certs/blacklisted/Symantec_Class_1_Public_Primary_Certification_Authority_-_G6.pem
OLD_FILES+=usr/share/certs/blacklisted/Symantec_Class_2_Public_Primary_Certification_Authority_-_G4.pem
OLD_FILES+=usr/share/certs/blacklisted/Symantec_Class_2_Public_Primary_Certification_Authority_-_G6.pem
OLD_FILES+=usr/share/certs/blacklisted/Taiwan_GRCA.pem
OLD_FILES+=usr/share/certs/blacklisted/thawte_Primary_Root_CA_-_G2.pem
OLD_FILES+=usr/share/certs/blacklisted/thawte_Primary_Root_CA_-_G3.pem
OLD_FILES+=usr/share/certs/blacklisted/thawte_Primary_Root_CA.pem
OLD_FILES+=usr/share/certs/blacklisted/Trustis_FPS_Root_CA.pem
OLD_FILES+=usr/share/certs/blacklisted/Verisign_Class_1_Public_Primary_Certification_Authority_-_G3.pem
OLD_FILES+=usr/share/certs/blacklisted/Verisign_Class_2_Public_Primary_Certification_Authority_-_G3.pem
OLD_FILES+=usr/share/certs/blacklisted/Verisign_Class_3_Public_Primary_Certification_Authority_-_G3.pem
OLD_FILES+=usr/share/certs/blacklisted/VeriSign_Class_3_Public_Primary_Certification_Authority_-_G4.pem
OLD_FILES+=usr/share/certs/blacklisted/VeriSign_Class_3_Public_Primary_Certification_Authority_-_G5.pem
OLD_FILES+=usr/share/certs/blacklisted/VeriSign_Universal_Root_Certification_Authority.pem
OLD_DIRS+=usr/share/certs/blacklisted
# 20210613: new clang import which bumps version from 11.0.1 to 12.0.0
OLD_FILES+=usr/lib/clang/11.0.1/include/cuda_wrappers/algorithm
OLD_FILES+=usr/lib/clang/11.0.1/include/cuda_wrappers/complex
OLD_FILES+=usr/lib/clang/11.0.1/include/cuda_wrappers/new
OLD_DIRS+=usr/lib/clang/11.0.1/include/cuda_wrappers
OLD_FILES+=usr/lib/clang/11.0.1/include/fuzzer/FuzzedDataProvider.h
OLD_DIRS+=usr/lib/clang/11.0.1/include/fuzzer
OLD_FILES+=usr/lib/clang/11.0.1/include/openmp_wrappers/__clang_openmp_device_functions.h
OLD_FILES+=usr/lib/clang/11.0.1/include/openmp_wrappers/cmath
OLD_FILES+=usr/lib/clang/11.0.1/include/openmp_wrappers/complex
OLD_FILES+=usr/lib/clang/11.0.1/include/openmp_wrappers/complex.h
OLD_FILES+=usr/lib/clang/11.0.1/include/openmp_wrappers/math.h
OLD_FILES+=usr/lib/clang/11.0.1/include/openmp_wrappers/new
OLD_DIRS+=usr/lib/clang/11.0.1/include/openmp_wrappers
OLD_FILES+=usr/lib/clang/11.0.1/include/ppc_wrappers/emmintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/ppc_wrappers/mm_malloc.h
OLD_FILES+=usr/lib/clang/11.0.1/include/ppc_wrappers/mmintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/ppc_wrappers/pmmintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/ppc_wrappers/smmintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/ppc_wrappers/tmmintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/ppc_wrappers/xmmintrin.h
OLD_DIRS+=usr/lib/clang/11.0.1/include/ppc_wrappers
OLD_FILES+=usr/lib/clang/11.0.1/include/profile/InstrProfData.inc
OLD_DIRS+=usr/lib/clang/11.0.1/include/profile
OLD_FILES+=usr/lib/clang/11.0.1/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/11.0.1/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/11.0.1/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/11.0.1/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/11.0.1/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/11.0.1/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/11.0.1/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/11.0.1/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/11.0.1/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/11.0.1/include/sanitizer/netbsd_syscall_hooks.h
OLD_FILES+=usr/lib/clang/11.0.1/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/11.0.1/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/11.0.1/include/sanitizer/tsan_interface_atomic.h
OLD_FILES+=usr/lib/clang/11.0.1/include/sanitizer/ubsan_interface.h
OLD_DIRS+=usr/lib/clang/11.0.1/include/sanitizer
OLD_FILES+=usr/lib/clang/11.0.1/include/xray/xray_interface.h
OLD_FILES+=usr/lib/clang/11.0.1/include/xray/xray_log_interface.h
OLD_FILES+=usr/lib/clang/11.0.1/include/xray/xray_records.h
OLD_DIRS+=usr/lib/clang/11.0.1/include/xray
OLD_FILES+=usr/lib/clang/11.0.1/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/11.0.1/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/11.0.1/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/11.0.1/include/__clang_cuda_device_functions.h
OLD_FILES+=usr/lib/clang/11.0.1/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/11.0.1/include/__clang_cuda_libdevice_declares.h
OLD_FILES+=usr/lib/clang/11.0.1/include/__clang_cuda_math.h
OLD_FILES+=usr/lib/clang/11.0.1/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/11.0.1/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/11.0.1/include/__clang_hip_libdevice_declares.h
OLD_FILES+=usr/lib/clang/11.0.1/include/__clang_hip_math.h
OLD_FILES+=usr/lib/clang/11.0.1/include/__clang_hip_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/11.0.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/11.0.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/11.0.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/11.0.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/altivec.h
OLD_FILES+=usr/lib/clang/11.0.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/amxintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/arm64intr.h
OLD_FILES+=usr/lib/clang/11.0.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/11.0.1/include/arm_bf16.h
OLD_FILES+=usr/lib/clang/11.0.1/include/arm_cde.h
OLD_FILES+=usr/lib/clang/11.0.1/include/arm_cmse.h
OLD_FILES+=usr/lib/clang/11.0.1/include/arm_fp16.h
OLD_FILES+=usr/lib/clang/11.0.1/include/arm_mve.h
OLD_FILES+=usr/lib/clang/11.0.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/11.0.1/include/arm_sve.h
OLD_FILES+=usr/lib/clang/11.0.1/include/armintr.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512bf16intrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512vlbf16intrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512vlvp2intersectintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512vp2intersectintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/cet.h
OLD_FILES+=usr/lib/clang/11.0.1/include/cetintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/cldemoteintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/11.0.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/enqcmdintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/float.h
OLD_FILES+=usr/lib/clang/11.0.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/htmintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/intrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/inttypes.h
OLD_FILES+=usr/lib/clang/11.0.1/include/invpcidintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/iso646.h
OLD_FILES+=usr/lib/clang/11.0.1/include/limits.h
OLD_FILES+=usr/lib/clang/11.0.1/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/11.0.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/11.0.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/11.0.1/include/movdirintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/msa.h
OLD_FILES+=usr/lib/clang/11.0.1/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/omp-tools.h
OLD_FILES+=usr/lib/clang/11.0.1/include/omp.h
OLD_FILES+=usr/lib/clang/11.0.1/include/ompt.h
OLD_FILES+=usr/lib/clang/11.0.1/include/opencl-c-base.h
OLD_FILES+=usr/lib/clang/11.0.1/include/opencl-c.h
OLD_FILES+=usr/lib/clang/11.0.1/include/pconfigintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/ptwriteintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/s390intrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/serializeintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/sgxintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/stdalign.h
OLD_FILES+=usr/lib/clang/11.0.1/include/stdarg.h
OLD_FILES+=usr/lib/clang/11.0.1/include/stdatomic.h
OLD_FILES+=usr/lib/clang/11.0.1/include/stdbool.h
OLD_FILES+=usr/lib/clang/11.0.1/include/stddef.h
OLD_FILES+=usr/lib/clang/11.0.1/include/stdint.h
OLD_FILES+=usr/lib/clang/11.0.1/include/stdnoreturn.h
OLD_FILES+=usr/lib/clang/11.0.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/tgmath.h
OLD_FILES+=usr/lib/clang/11.0.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/tsxldtrkintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/unwind.h
OLD_FILES+=usr/lib/clang/11.0.1/include/vadefs.h
OLD_FILES+=usr/lib/clang/11.0.1/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/varargs.h
OLD_FILES+=usr/lib/clang/11.0.1/include/vecintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/waitpkgintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/wasm_simd128.h
OLD_FILES+=usr/lib/clang/11.0.1/include/wbnoinvdintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/xopintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/11.0.1/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/11.0.1/include
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.asan-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.asan-aarch64.so
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.asan-arm.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.asan-arm.so
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.asan-armhf.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.asan-armhf.so
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.asan-preinit-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.asan-preinit-arm.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.asan-preinit-armhf.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.asan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.asan_cxx-arm.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.asan_cxx-armhf.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.cfi-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.cfi-arm.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.cfi-armhf.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.cfi-i386.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.cfi-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.cfi_diag-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.cfi_diag-arm.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.cfi_diag-armhf.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.cfi_diag-i386.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.cfi_diag-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.dd-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.dd-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.fuzzer-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.fuzzer-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.fuzzer_no_main-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.fuzzer_no_main-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.msan-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.msan-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.msan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.profile-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.profile-powerpc.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.profile-powerpc64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.profile-powerpc64le.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.safestack-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.stats-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.stats-arm.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.stats-armhf.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.stats_client-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.stats_client-arm.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.stats_client-armhf.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.tsan-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.tsan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.ubsan_minimal-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.ubsan_minimal-arm.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.ubsan_minimal-armhf.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.ubsan_standalone-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.ubsan_standalone-arm.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.ubsan_standalone-armhf.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-arm.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-armhf.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.xray-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.xray-arm.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.xray-armhf.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.xray-basic-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.xray-basic-arm.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.xray-basic-armhf.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.xray-basic-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.xray-fdr-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.xray-fdr-arm.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.xray-fdr-armhf.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.xray-fdr-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.xray-profiling-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.xray-profiling-arm.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.xray-profiling-armhf.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.xray-profiling-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.1/lib/freebsd/libclang_rt.xray-x86_64.a
OLD_DIRS+=usr/lib/clang/11.0.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/11.0.1/lib
OLD_DIRS+=usr/lib/clang/11.0.1
# 20210613: Rename OpenZFS manual pages
OLD_FILES+=usr/share/man/man5/spl-module-parameters.5.gz
OLD_FILES+=usr/share/man/man5/zfs-events.5.gz
OLD_FILES+=usr/share/man/man5/zfs-module-parameters.5.gz
OLD_FILES+=usr/share/man/man8/zfsconcepts.8.gz
OLD_FILES+=usr/share/man/man8/zfsprops.8.gz
OLD_FILES+=usr/share/man/man5/zpool-features.5.gz
OLD_FILES+=usr/share/man/man8/zpoolconcepts.8.gz
OLD_FILES+=usr/share/man/man8/zpoolprops.8.gz
# 20210611: Remove svn and svnlite
OLD_FILES+=usr/bin/svn
OLD_FILES+=usr/bin/svnadmin
OLD_FILES+=usr/bin/svnbench
OLD_FILES+=usr/bin/svndumpfilter
OLD_FILES+=usr/bin/svnfsfs
OLD_FILES+=usr/bin/svnlite
OLD_FILES+=usr/bin/svnliteadmin
OLD_FILES+=usr/bin/svnlitebench
OLD_FILES+=usr/bin/svnlitedumpfilter
OLD_FILES+=usr/bin/svnlitefsfs
OLD_FILES+=usr/bin/svnlitelook
OLD_FILES+=usr/bin/svnlitemucc
OLD_FILES+=usr/bin/svnliterdump
OLD_FILES+=usr/bin/svnliteserve
OLD_FILES+=usr/bin/svnlitesync
OLD_FILES+=usr/bin/svnliteversion
OLD_FILES+=usr/bin/svnlook
OLD_FILES+=usr/bin/svnmucc
OLD_FILES+=usr/bin/svnrdump
OLD_FILES+=usr/bin/svnserve
OLD_FILES+=usr/bin/svnsync
OLD_FILES+=usr/bin/svnversion
OLD_FILES+=usr/share/man/man1/svnlite.1.gz
# 20210607: remove ancontrol(8) related programs
OLD_FILES+=usr/sbin/ancontrol
OLD_FILES+=usr/share/man/man8/ancontrol.8.gz
# 20210607: remove an(4)
OLD_FILES+=usr/include/dev/an/if_aironet_ieee.h
OLD_FILES+=usr/include/dev/an/if_anreg.h
OLD_FILES+=usr/share/man/man4/an.4.gz
# 20210426: remove unused libexec/rc.d/addswap
OLD_FILES+=etc/rc.d/addswap
# 20210413: Remove pfctlinput2
OLD_FILES+=usr/share/man/man9/pfctlinput2.9.gz
# 20210412: Remove kernel asymmetric crypto
OLD_FILES+=usr/share/man/man9/crypto_asym.9.gz
OLD_FILES+=usr/share/man/man9/crypto_kdispatch.9.gz
OLD_FILES+=usr/share/man/man9/crypto_kdone.9.gz
OLD_FILES+=usr/share/man/man9/crypto_kregister.9.gz
OLD_FILES+=usr/share/man/man9/CRYPTODEV_KPROCESS.9.gz
# 20210410: remove unused libexec/rc.d/archdep
OLD_FILES+=etc/rc.d/archdep
# 20210408: remove tcp_hostcache.h
OLD_FILES+=usr/include/netinet/tcp_hostcache.h
# 20210403: remove kgmon(8)
OLD_FILES+=usr/sbin/kgmon
OLD_FILES+=usr/share/man/man8/kgmon.8.gz
# 20210401: remove bt(4) man page
OLD_FILES+=usr/share/man/man4/bt.4.gz
# 20210322: retire mn(4) sync serial driver
OLD_FILES+=usr/share/man/man4/if_mn.4.gz
OLD_FILES+=usr/share/man/man4/mn.4.gz
# 20210318: remove the terminfo database
OLD_FILES+=usr/share/terminfo/1/1178
OLD_FILES+=usr/share/terminfo/1/1730-lm
OLD_DIRS+=usr/share/terminfo/1
OLD_FILES+=usr/share/terminfo/2/2621
OLD_FILES+=usr/share/terminfo/2/2621-wl
OLD_FILES+=usr/share/terminfo/2/2621A
OLD_FILES+=usr/share/terminfo/2/2621a
OLD_DIRS+=usr/share/terminfo/2/
OLD_FILES+=usr/share/terminfo/3/386at
OLD_FILES+=usr/share/terminfo/3/3b1
OLD_DIRS+=usr/share/terminfo/3/
OLD_FILES+=usr/share/terminfo/4/4025ex
OLD_FILES+=usr/share/terminfo/4/4027ex
OLD_FILES+=usr/share/terminfo/4/4410-w
OLD_DIRS+=usr/share/terminfo/4/
OLD_FILES+=usr/share/terminfo/5/5051
OLD_FILES+=usr/share/terminfo/5/5410-w
OLD_FILES+=usr/share/terminfo/5/5620
OLD_FILES+=usr/share/terminfo/5/5630-24
OLD_FILES+=usr/share/terminfo/5/5630DMD-24
OLD_DIRS+=usr/share/terminfo/5/
OLD_FILES+=usr/share/terminfo/6/6053
OLD_FILES+=usr/share/terminfo/6/6053-dg
OLD_FILES+=usr/share/terminfo/6/605x
OLD_FILES+=usr/share/terminfo/6/605x-dg
OLD_FILES+=usr/share/terminfo/6/630-lm
OLD_FILES+=usr/share/terminfo/6/630MTG-24
OLD_DIRS+=usr/share/terminfo/6/
OLD_FILES+=usr/share/terminfo/7/730MTG-24
OLD_FILES+=usr/share/terminfo/7/730MTG-41
OLD_FILES+=usr/share/terminfo/7/730MTG-41r
OLD_FILES+=usr/share/terminfo/7/730MTGr
OLD_FILES+=usr/share/terminfo/7/730MTGr-24
OLD_DIRS+=usr/share/terminfo/7/
OLD_FILES+=usr/share/terminfo/8/8510
OLD_DIRS+=usr/share/terminfo/8/
OLD_FILES+=usr/share/terminfo/9/955-hb
OLD_FILES+=usr/share/terminfo/9/955-w
OLD_FILES+=usr/share/terminfo/9/9term
OLD_DIRS+=usr/share/terminfo/9/
OLD_FILES+=usr/share/terminfo/A/Apple_Terminal
OLD_DIRS+=usr/share/terminfo/A/
OLD_FILES+=usr/share/terminfo/E/Eterm
OLD_FILES+=usr/share/terminfo/E/Eterm-256color
OLD_FILES+=usr/share/terminfo/E/Eterm-88color
OLD_FILES+=usr/share/terminfo/E/Eterm-color
OLD_DIRS+=usr/share/terminfo/E/
OLD_FILES+=usr/share/terminfo/L/LFT-PC850
OLD_DIRS+=usr/share/terminfo/L/
OLD_FILES+=usr/share/terminfo/M/MtxOrb
OLD_FILES+=usr/share/terminfo/M/MtxOrb162
OLD_FILES+=usr/share/terminfo/M/MtxOrb204
OLD_DIRS+=usr/share/terminfo/M/
OLD_FILES+=usr/share/terminfo/N/NCR260VT300WPP
OLD_FILES+=usr/share/terminfo/N/NCRVT100WPP
OLD_DIRS+=usr/share/terminfo/N/
OLD_FILES+=usr/share/terminfo/P/P12
OLD_FILES+=usr/share/terminfo/P/P12-M
OLD_FILES+=usr/share/terminfo/P/P12-M-W
OLD_FILES+=usr/share/terminfo/P/P12-W
OLD_FILES+=usr/share/terminfo/P/P14
OLD_FILES+=usr/share/terminfo/P/P14-M
OLD_FILES+=usr/share/terminfo/P/P14-M-W
OLD_FILES+=usr/share/terminfo/P/P14-W
OLD_FILES+=usr/share/terminfo/P/P4
OLD_FILES+=usr/share/terminfo/P/P5
OLD_FILES+=usr/share/terminfo/P/P7
OLD_FILES+=usr/share/terminfo/P/P8
OLD_FILES+=usr/share/terminfo/P/P8-W
OLD_FILES+=usr/share/terminfo/P/P9
OLD_FILES+=usr/share/terminfo/P/P9-8
OLD_FILES+=usr/share/terminfo/P/P9-8-W
OLD_FILES+=usr/share/terminfo/P/P9-W
OLD_DIRS+=usr/share/terminfo/P/
OLD_FILES+=usr/share/terminfo/Q/Q306-8-pc
OLD_FILES+=usr/share/terminfo/Q/Q310-vip-H
OLD_FILES+=usr/share/terminfo/Q/Q310-vip-H-am
OLD_FILES+=usr/share/terminfo/Q/Q310-vip-Hw
OLD_FILES+=usr/share/terminfo/Q/Q310-vip-w
OLD_FILES+=usr/share/terminfo/Q/Q310-vip-w-am
OLD_DIRS+=usr/share/terminfo/Q/
OLD_FILES+=usr/share/terminfo/X/X-hpterm
OLD_DIRS+=usr/share/terminfo/X/
OLD_FILES+=usr/share/terminfo/a/a210
OLD_FILES+=usr/share/terminfo/a/a80
OLD_FILES+=usr/share/terminfo/a/a980
OLD_FILES+=usr/share/terminfo/a/aa4080
OLD_FILES+=usr/share/terminfo/a/aaa
OLD_FILES+=usr/share/terminfo/a/aaa+dec
OLD_FILES+=usr/share/terminfo/a/aaa+rv
OLD_FILES+=usr/share/terminfo/a/aaa+unk
OLD_FILES+=usr/share/terminfo/a/aaa-18
OLD_FILES+=usr/share/terminfo/a/aaa-18-rv
OLD_FILES+=usr/share/terminfo/a/aaa-20
OLD_FILES+=usr/share/terminfo/a/aaa-22
OLD_FILES+=usr/share/terminfo/a/aaa-24
OLD_FILES+=usr/share/terminfo/a/aaa-24-rv
OLD_FILES+=usr/share/terminfo/a/aaa-26
OLD_FILES+=usr/share/terminfo/a/aaa-28
OLD_FILES+=usr/share/terminfo/a/aaa-30
OLD_FILES+=usr/share/terminfo/a/aaa-30-ctxt
OLD_FILES+=usr/share/terminfo/a/aaa-30-rv
OLD_FILES+=usr/share/terminfo/a/aaa-30-rv-ctxt
OLD_FILES+=usr/share/terminfo/a/aaa-30-s
OLD_FILES+=usr/share/terminfo/a/aaa-30-s-ctxt
OLD_FILES+=usr/share/terminfo/a/aaa-30-s-rv
OLD_FILES+=usr/share/terminfo/a/aaa-30-s-rv-ct
OLD_FILES+=usr/share/terminfo/a/aaa-36
OLD_FILES+=usr/share/terminfo/a/aaa-36-rv
OLD_FILES+=usr/share/terminfo/a/aaa-40
OLD_FILES+=usr/share/terminfo/a/aaa-40-rv
OLD_FILES+=usr/share/terminfo/a/aaa-48
OLD_FILES+=usr/share/terminfo/a/aaa-48-rv
OLD_FILES+=usr/share/terminfo/a/aaa-60
OLD_FILES+=usr/share/terminfo/a/aaa-60-dec-rv
OLD_FILES+=usr/share/terminfo/a/aaa-60-rv
OLD_FILES+=usr/share/terminfo/a/aaa-60-s
OLD_FILES+=usr/share/terminfo/a/aaa-60-s-rv
OLD_FILES+=usr/share/terminfo/a/aaa-ctxt
OLD_FILES+=usr/share/terminfo/a/aaa-db
OLD_FILES+=usr/share/terminfo/a/aaa-rv
OLD_FILES+=usr/share/terminfo/a/aaa-rv-ctxt
OLD_FILES+=usr/share/terminfo/a/aaa-rv-unk
OLD_FILES+=usr/share/terminfo/a/aaa-s
OLD_FILES+=usr/share/terminfo/a/aaa-s-ctxt
OLD_FILES+=usr/share/terminfo/a/aaa-s-rv
OLD_FILES+=usr/share/terminfo/a/aaa-s-rv-ctxt
OLD_FILES+=usr/share/terminfo/a/aaa-unk
OLD_FILES+=usr/share/terminfo/a/aas1901
OLD_FILES+=usr/share/terminfo/a/abm80
OLD_FILES+=usr/share/terminfo/a/abm85
OLD_FILES+=usr/share/terminfo/a/abm85e
OLD_FILES+=usr/share/terminfo/a/abm85h
OLD_FILES+=usr/share/terminfo/a/abm85h-old
OLD_FILES+=usr/share/terminfo/a/absolute
OLD_FILES+=usr/share/terminfo/a/act4
OLD_FILES+=usr/share/terminfo/a/act5
OLD_FILES+=usr/share/terminfo/a/addrinfo
OLD_FILES+=usr/share/terminfo/a/adds200
OLD_FILES+=usr/share/terminfo/a/adds980
OLD_FILES+=usr/share/terminfo/a/addsviewpoint
OLD_FILES+=usr/share/terminfo/a/addsvp60
OLD_FILES+=usr/share/terminfo/a/adm+sgr
OLD_FILES+=usr/share/terminfo/a/adm1
OLD_FILES+=usr/share/terminfo/a/adm11
OLD_FILES+=usr/share/terminfo/a/adm1178
OLD_FILES+=usr/share/terminfo/a/adm12
OLD_FILES+=usr/share/terminfo/a/adm1a
OLD_FILES+=usr/share/terminfo/a/adm2
OLD_FILES+=usr/share/terminfo/a/adm20
OLD_FILES+=usr/share/terminfo/a/adm21
OLD_FILES+=usr/share/terminfo/a/adm22
OLD_FILES+=usr/share/terminfo/a/adm3
OLD_FILES+=usr/share/terminfo/a/adm31
OLD_FILES+=usr/share/terminfo/a/adm31-old
OLD_FILES+=usr/share/terminfo/a/adm36
OLD_FILES+=usr/share/terminfo/a/adm3a
OLD_FILES+=usr/share/terminfo/a/adm3a+
OLD_FILES+=usr/share/terminfo/a/adm42
OLD_FILES+=usr/share/terminfo/a/adm42-ns
OLD_FILES+=usr/share/terminfo/a/adm5
OLD_FILES+=usr/share/terminfo/a/aepro
OLD_FILES+=usr/share/terminfo/a/aixterm
OLD_FILES+=usr/share/terminfo/a/aixterm-16color
OLD_FILES+=usr/share/terminfo/a/aixterm-m
OLD_FILES+=usr/share/terminfo/a/aixterm-m-old
OLD_FILES+=usr/share/terminfo/a/aj
OLD_FILES+=usr/share/terminfo/a/aj510
OLD_FILES+=usr/share/terminfo/a/aj830
OLD_FILES+=usr/share/terminfo/a/aj832
OLD_FILES+=usr/share/terminfo/a/alacritty
OLD_FILES+=usr/share/terminfo/a/alacritty+common
OLD_FILES+=usr/share/terminfo/a/alacritty-direct
OLD_FILES+=usr/share/terminfo/a/alt2
OLD_FILES+=usr/share/terminfo/a/alt3
OLD_FILES+=usr/share/terminfo/a/alt4
OLD_FILES+=usr/share/terminfo/a/alt5
OLD_FILES+=usr/share/terminfo/a/alt7
OLD_FILES+=usr/share/terminfo/a/alt7pc
OLD_FILES+=usr/share/terminfo/a/alto-h19
OLD_FILES+=usr/share/terminfo/a/alto-heath
OLD_FILES+=usr/share/terminfo/a/altoh19
OLD_FILES+=usr/share/terminfo/a/altoheath
OLD_FILES+=usr/share/terminfo/a/altos-2
OLD_FILES+=usr/share/terminfo/a/altos-3
OLD_FILES+=usr/share/terminfo/a/altos-4
OLD_FILES+=usr/share/terminfo/a/altos-5
OLD_FILES+=usr/share/terminfo/a/altos2
OLD_FILES+=usr/share/terminfo/a/altos3
OLD_FILES+=usr/share/terminfo/a/altos4
OLD_FILES+=usr/share/terminfo/a/altos5
OLD_FILES+=usr/share/terminfo/a/altos7
OLD_FILES+=usr/share/terminfo/a/altos7pc
OLD_FILES+=usr/share/terminfo/a/ambas
OLD_FILES+=usr/share/terminfo/a/ambassador
OLD_FILES+=usr/share/terminfo/a/amiga
OLD_FILES+=usr/share/terminfo/a/amiga-8bit
OLD_FILES+=usr/share/terminfo/a/amiga-h
OLD_FILES+=usr/share/terminfo/a/amiga-vnc
OLD_FILES+=usr/share/terminfo/a/amp219
OLD_FILES+=usr/share/terminfo/a/amp219w
OLD_FILES+=usr/share/terminfo/a/ampex-219
OLD_FILES+=usr/share/terminfo/a/ampex-219w
OLD_FILES+=usr/share/terminfo/a/ampex-232
OLD_FILES+=usr/share/terminfo/a/ampex175
OLD_FILES+=usr/share/terminfo/a/ampex175-b
OLD_FILES+=usr/share/terminfo/a/ampex210
OLD_FILES+=usr/share/terminfo/a/ampex219
OLD_FILES+=usr/share/terminfo/a/ampex219w
OLD_FILES+=usr/share/terminfo/a/ampex232
OLD_FILES+=usr/share/terminfo/a/ampex232w
OLD_FILES+=usr/share/terminfo/a/ampex80
OLD_FILES+=usr/share/terminfo/a/annarbor4080
OLD_FILES+=usr/share/terminfo/a/ansi
OLD_FILES+=usr/share/terminfo/a/ansi+arrows
OLD_FILES+=usr/share/terminfo/a/ansi+csr
OLD_FILES+=usr/share/terminfo/a/ansi+cup
OLD_FILES+=usr/share/terminfo/a/ansi+enq
OLD_FILES+=usr/share/terminfo/a/ansi+erase
OLD_FILES+=usr/share/terminfo/a/ansi+idc
OLD_FILES+=usr/share/terminfo/a/ansi+idc1
OLD_FILES+=usr/share/terminfo/a/ansi+idl
OLD_FILES+=usr/share/terminfo/a/ansi+idl1
OLD_FILES+=usr/share/terminfo/a/ansi+inittabs
OLD_FILES+=usr/share/terminfo/a/ansi+local
OLD_FILES+=usr/share/terminfo/a/ansi+local1
OLD_FILES+=usr/share/terminfo/a/ansi+pp
OLD_FILES+=usr/share/terminfo/a/ansi+rca
OLD_FILES+=usr/share/terminfo/a/ansi+rep
OLD_FILES+=usr/share/terminfo/a/ansi+sgr
OLD_FILES+=usr/share/terminfo/a/ansi+sgrbold
OLD_FILES+=usr/share/terminfo/a/ansi+sgrdim
OLD_FILES+=usr/share/terminfo/a/ansi+sgrso
OLD_FILES+=usr/share/terminfo/a/ansi+sgrul
OLD_FILES+=usr/share/terminfo/a/ansi+tabs
OLD_FILES+=usr/share/terminfo/a/ansi-color-2-emx
OLD_FILES+=usr/share/terminfo/a/ansi-color-3-emx
OLD_FILES+=usr/share/terminfo/a/ansi-emx
OLD_FILES+=usr/share/terminfo/a/ansi-generic
OLD_FILES+=usr/share/terminfo/a/ansi-m
OLD_FILES+=usr/share/terminfo/a/ansi-mini
OLD_FILES+=usr/share/terminfo/a/ansi-mono
OLD_FILES+=usr/share/terminfo/a/ansi-mr
OLD_FILES+=usr/share/terminfo/a/ansi-mtabs
OLD_FILES+=usr/share/terminfo/a/ansi-nt
OLD_FILES+=usr/share/terminfo/a/ansi.sys
OLD_FILES+=usr/share/terminfo/a/ansi.sys-old
OLD_FILES+=usr/share/terminfo/a/ansi.sysk
OLD_FILES+=usr/share/terminfo/a/ansi43m
OLD_FILES+=usr/share/terminfo/a/ansi77
OLD_FILES+=usr/share/terminfo/a/ansi80x25
OLD_FILES+=usr/share/terminfo/a/ansi80x25-mono
OLD_FILES+=usr/share/terminfo/a/ansi80x25-raw
OLD_FILES+=usr/share/terminfo/a/ansi80x30
OLD_FILES+=usr/share/terminfo/a/ansi80x30-mono
OLD_FILES+=usr/share/terminfo/a/ansi80x43
OLD_FILES+=usr/share/terminfo/a/ansi80x43-mono
OLD_FILES+=usr/share/terminfo/a/ansi80x50
OLD_FILES+=usr/share/terminfo/a/ansi80x50-mono
OLD_FILES+=usr/share/terminfo/a/ansi80x60
OLD_FILES+=usr/share/terminfo/a/ansi80x60-mono
OLD_FILES+=usr/share/terminfo/a/ansil
OLD_FILES+=usr/share/terminfo/a/ansil-mono
OLD_FILES+=usr/share/terminfo/a/ansis
OLD_FILES+=usr/share/terminfo/a/ansis-mono
OLD_FILES+=usr/share/terminfo/a/ansisysk
OLD_FILES+=usr/share/terminfo/a/ansiterm
OLD_FILES+=usr/share/terminfo/a/ansiw
OLD_FILES+=usr/share/terminfo/a/ap-vm80
OLD_FILES+=usr/share/terminfo/a/apl
OLD_FILES+=usr/share/terminfo/a/apollo
OLD_FILES+=usr/share/terminfo/a/apollo_15P
OLD_FILES+=usr/share/terminfo/a/apollo_19L
OLD_FILES+=usr/share/terminfo/a/apollo_color
OLD_FILES+=usr/share/terminfo/a/apple-80
OLD_FILES+=usr/share/terminfo/a/apple-ae
OLD_FILES+=usr/share/terminfo/a/apple-soroc
OLD_FILES+=usr/share/terminfo/a/apple-uterm
OLD_FILES+=usr/share/terminfo/a/apple-uterm-vb
OLD_FILES+=usr/share/terminfo/a/apple-videx
OLD_FILES+=usr/share/terminfo/a/apple-videx2
OLD_FILES+=usr/share/terminfo/a/apple-videx3
OLD_FILES+=usr/share/terminfo/a/apple-vm80
OLD_FILES+=usr/share/terminfo/a/apple2e
OLD_FILES+=usr/share/terminfo/a/apple2e-p
OLD_FILES+=usr/share/terminfo/a/apple80p
OLD_FILES+=usr/share/terminfo/a/appleII
OLD_FILES+=usr/share/terminfo/a/appleIIc
OLD_FILES+=usr/share/terminfo/a/appleIIe
OLD_FILES+=usr/share/terminfo/a/appleIIgs
OLD_FILES+=usr/share/terminfo/a/arm100
OLD_FILES+=usr/share/terminfo/a/arm100-am
OLD_FILES+=usr/share/terminfo/a/arm100-w
OLD_FILES+=usr/share/terminfo/a/arm100-wam
OLD_FILES+=usr/share/terminfo/a/at
OLD_FILES+=usr/share/terminfo/a/at-color
OLD_FILES+=usr/share/terminfo/a/at-m
OLD_FILES+=usr/share/terminfo/a/at386
OLD_FILES+=usr/share/terminfo/a/atari
OLD_FILES+=usr/share/terminfo/a/atari-color
OLD_FILES+=usr/share/terminfo/a/atari-m
OLD_FILES+=usr/share/terminfo/a/atari-old
OLD_FILES+=usr/share/terminfo/a/atari_st
OLD_FILES+=usr/share/terminfo/a/atari_st-color
OLD_FILES+=usr/share/terminfo/a/atarist-m
OLD_FILES+=usr/share/terminfo/a/aterm
OLD_FILES+=usr/share/terminfo/a/att2300
OLD_FILES+=usr/share/terminfo/a/att2350
OLD_FILES+=usr/share/terminfo/a/att4410
OLD_FILES+=usr/share/terminfo/a/att4410-w
OLD_FILES+=usr/share/terminfo/a/att4410v1
OLD_FILES+=usr/share/terminfo/a/att4410v1-w
OLD_FILES+=usr/share/terminfo/a/att4415
OLD_FILES+=usr/share/terminfo/a/att4415+nl
OLD_FILES+=usr/share/terminfo/a/att4415-nl
OLD_FILES+=usr/share/terminfo/a/att4415-rv
OLD_FILES+=usr/share/terminfo/a/att4415-rv-nl
OLD_FILES+=usr/share/terminfo/a/att4415-w
OLD_FILES+=usr/share/terminfo/a/att4415-w-nl
OLD_FILES+=usr/share/terminfo/a/att4415-w-rv
OLD_FILES+=usr/share/terminfo/a/att4415-w-rv-n
OLD_FILES+=usr/share/terminfo/a/att4418
OLD_FILES+=usr/share/terminfo/a/att4418-w
OLD_FILES+=usr/share/terminfo/a/att4420
OLD_FILES+=usr/share/terminfo/a/att4424
OLD_FILES+=usr/share/terminfo/a/att4424-1
OLD_FILES+=usr/share/terminfo/a/att4424m
OLD_FILES+=usr/share/terminfo/a/att4425
OLD_FILES+=usr/share/terminfo/a/att4425-nl
OLD_FILES+=usr/share/terminfo/a/att4425-w
OLD_FILES+=usr/share/terminfo/a/att4426
OLD_FILES+=usr/share/terminfo/a/att500
OLD_FILES+=usr/share/terminfo/a/att505
OLD_FILES+=usr/share/terminfo/a/att505-24
OLD_FILES+=usr/share/terminfo/a/att510a
OLD_FILES+=usr/share/terminfo/a/att510d
OLD_FILES+=usr/share/terminfo/a/att513
OLD_FILES+=usr/share/terminfo/a/att5310
OLD_FILES+=usr/share/terminfo/a/att5320
OLD_FILES+=usr/share/terminfo/a/att5410
OLD_FILES+=usr/share/terminfo/a/att5410-w
OLD_FILES+=usr/share/terminfo/a/att5410v1
OLD_FILES+=usr/share/terminfo/a/att5410v1-w
OLD_FILES+=usr/share/terminfo/a/att5418
OLD_FILES+=usr/share/terminfo/a/att5418-w
OLD_FILES+=usr/share/terminfo/a/att5420
OLD_FILES+=usr/share/terminfo/a/att5420+nl
OLD_FILES+=usr/share/terminfo/a/att5420-nl
OLD_FILES+=usr/share/terminfo/a/att5420-rv
OLD_FILES+=usr/share/terminfo/a/att5420-rv-nl
OLD_FILES+=usr/share/terminfo/a/att5420-w
OLD_FILES+=usr/share/terminfo/a/att5420-w-nl
OLD_FILES+=usr/share/terminfo/a/att5420-w-rv
OLD_FILES+=usr/share/terminfo/a/att5420-w-rv-n
OLD_FILES+=usr/share/terminfo/a/att5420_2
OLD_FILES+=usr/share/terminfo/a/att5420_2-w
OLD_FILES+=usr/share/terminfo/a/att5425
OLD_FILES+=usr/share/terminfo/a/att5425-nl
OLD_FILES+=usr/share/terminfo/a/att5425-w
OLD_FILES+=usr/share/terminfo/a/att5430
OLD_FILES+=usr/share/terminfo/a/att5620
OLD_FILES+=usr/share/terminfo/a/att5620-1
OLD_FILES+=usr/share/terminfo/a/att5620-24
OLD_FILES+=usr/share/terminfo/a/att5620-34
OLD_FILES+=usr/share/terminfo/a/att5620-s
OLD_FILES+=usr/share/terminfo/a/att605
OLD_FILES+=usr/share/terminfo/a/att605-pc
OLD_FILES+=usr/share/terminfo/a/att605-w
OLD_FILES+=usr/share/terminfo/a/att610
OLD_FILES+=usr/share/terminfo/a/att610+cvis
OLD_FILES+=usr/share/terminfo/a/att610+cvis0
OLD_FILES+=usr/share/terminfo/a/att610-103k
OLD_FILES+=usr/share/terminfo/a/att610-103k-w
OLD_FILES+=usr/share/terminfo/a/att610-w
OLD_FILES+=usr/share/terminfo/a/att615
OLD_FILES+=usr/share/terminfo/a/att615-103k
OLD_FILES+=usr/share/terminfo/a/att615-103k-w
OLD_FILES+=usr/share/terminfo/a/att615-w
OLD_FILES+=usr/share/terminfo/a/att620
OLD_FILES+=usr/share/terminfo/a/att620-103k
OLD_FILES+=usr/share/terminfo/a/att620-103k-w
OLD_FILES+=usr/share/terminfo/a/att620-w
OLD_FILES+=usr/share/terminfo/a/att630
OLD_FILES+=usr/share/terminfo/a/att630-24
OLD_FILES+=usr/share/terminfo/a/att6386
OLD_FILES+=usr/share/terminfo/a/att700
OLD_FILES+=usr/share/terminfo/a/att730
OLD_FILES+=usr/share/terminfo/a/att730-24
OLD_FILES+=usr/share/terminfo/a/att730-41
OLD_FILES+=usr/share/terminfo/a/att7300
OLD_FILES+=usr/share/terminfo/a/att730r
OLD_FILES+=usr/share/terminfo/a/att730r-24
OLD_FILES+=usr/share/terminfo/a/att730r-41
OLD_FILES+=usr/share/terminfo/a/avatar
OLD_FILES+=usr/share/terminfo/a/avatar0
OLD_FILES+=usr/share/terminfo/a/avatar0+
OLD_FILES+=usr/share/terminfo/a/avatar1
OLD_FILES+=usr/share/terminfo/a/avt
OLD_FILES+=usr/share/terminfo/a/avt+s
OLD_FILES+=usr/share/terminfo/a/avt-ns
OLD_FILES+=usr/share/terminfo/a/avt-rv
OLD_FILES+=usr/share/terminfo/a/avt-rv-ns
OLD_FILES+=usr/share/terminfo/a/avt-rv-s
OLD_FILES+=usr/share/terminfo/a/avt-s
OLD_FILES+=usr/share/terminfo/a/avt-w
OLD_FILES+=usr/share/terminfo/a/avt-w-ns
OLD_FILES+=usr/share/terminfo/a/avt-w-rv
OLD_FILES+=usr/share/terminfo/a/avt-w-rv-ns
OLD_FILES+=usr/share/terminfo/a/avt-w-rv-s
OLD_FILES+=usr/share/terminfo/a/avt-w-s
OLD_FILES+=usr/share/terminfo/a/aws
OLD_FILES+=usr/share/terminfo/a/awsc
OLD_DIRS+=usr/share/terminfo/a/
OLD_FILES+=usr/share/terminfo/b/b-128
OLD_FILES+=usr/share/terminfo/b/bantam
OLD_FILES+=usr/share/terminfo/b/basic4
OLD_FILES+=usr/share/terminfo/b/basis
OLD_FILES+=usr/share/terminfo/b/bct510a
OLD_FILES+=usr/share/terminfo/b/bct510d
OLD_FILES+=usr/share/terminfo/b/beacon
OLD_FILES+=usr/share/terminfo/b/bee
OLD_FILES+=usr/share/terminfo/b/beehive
OLD_FILES+=usr/share/terminfo/b/beehive3
OLD_FILES+=usr/share/terminfo/b/beehive4
OLD_FILES+=usr/share/terminfo/b/beehiveIIIm
OLD_FILES+=usr/share/terminfo/b/beterm
OLD_FILES+=usr/share/terminfo/b/bg1.25
OLD_FILES+=usr/share/terminfo/b/bg1.25nv
OLD_FILES+=usr/share/terminfo/b/bg1.25rv
OLD_FILES+=usr/share/terminfo/b/bg2.0
OLD_FILES+=usr/share/terminfo/b/bg2.0nv
OLD_FILES+=usr/share/terminfo/b/bg2.0rv
OLD_FILES+=usr/share/terminfo/b/bg3.10
OLD_FILES+=usr/share/terminfo/b/bg3.10nv
OLD_FILES+=usr/share/terminfo/b/bg3.10rv
OLD_FILES+=usr/share/terminfo/b/bh3m
OLD_FILES+=usr/share/terminfo/b/bh4
OLD_FILES+=usr/share/terminfo/b/bitgraph
OLD_FILES+=usr/share/terminfo/b/blit
OLD_FILES+=usr/share/terminfo/b/bobcat
OLD_FILES+=usr/share/terminfo/b/bq300
OLD_FILES+=usr/share/terminfo/b/bq300-8
OLD_FILES+=usr/share/terminfo/b/bq300-8-pc
OLD_FILES+=usr/share/terminfo/b/bq300-8-pc-rv
OLD_FILES+=usr/share/terminfo/b/bq300-8-pc-w
OLD_FILES+=usr/share/terminfo/b/bq300-8-pc-w-rv
OLD_FILES+=usr/share/terminfo/b/bq300-8rv
OLD_FILES+=usr/share/terminfo/b/bq300-8w
OLD_FILES+=usr/share/terminfo/b/bq300-pc
OLD_FILES+=usr/share/terminfo/b/bq300-pc-rv
OLD_FILES+=usr/share/terminfo/b/bq300-pc-w
OLD_FILES+=usr/share/terminfo/b/bq300-pc-w-rv
OLD_FILES+=usr/share/terminfo/b/bq300-rv
OLD_FILES+=usr/share/terminfo/b/bq300-w
OLD_FILES+=usr/share/terminfo/b/bq300-w-8rv
OLD_FILES+=usr/share/terminfo/b/bq300-w-rv
OLD_FILES+=usr/share/terminfo/b/bsdos-pc
OLD_FILES+=usr/share/terminfo/b/bsdos-pc-m
OLD_FILES+=usr/share/terminfo/b/bsdos-pc-mono
OLD_FILES+=usr/share/terminfo/b/bsdos-pc-nobold
OLD_FILES+=usr/share/terminfo/b/bsdos-ppc
OLD_FILES+=usr/share/terminfo/b/bsdos-sparc
OLD_FILES+=usr/share/terminfo/b/bterm
OLD_DIRS+=usr/share/terminfo/b/
OLD_FILES+=usr/share/terminfo/c/c100
OLD_FILES+=usr/share/terminfo/c/c100-1p
OLD_FILES+=usr/share/terminfo/c/c100-4p
OLD_FILES+=usr/share/terminfo/c/c100-rv
OLD_FILES+=usr/share/terminfo/c/c100-rv-4p
OLD_FILES+=usr/share/terminfo/c/c104
OLD_FILES+=usr/share/terminfo/c/c108
OLD_FILES+=usr/share/terminfo/c/c108-4p
OLD_FILES+=usr/share/terminfo/c/c108-8p
OLD_FILES+=usr/share/terminfo/c/c108-rv
OLD_FILES+=usr/share/terminfo/c/c108-rv-4p
OLD_FILES+=usr/share/terminfo/c/c108-rv-8p
OLD_FILES+=usr/share/terminfo/c/c108-w
OLD_FILES+=usr/share/terminfo/c/c108-w-8p
OLD_FILES+=usr/share/terminfo/c/c300
OLD_FILES+=usr/share/terminfo/c/c301
OLD_FILES+=usr/share/terminfo/c/c321
OLD_FILES+=usr/share/terminfo/c/ca22851
OLD_FILES+=usr/share/terminfo/c/cad68-2
OLD_FILES+=usr/share/terminfo/c/cad68-3
OLD_FILES+=usr/share/terminfo/c/cbblit
OLD_FILES+=usr/share/terminfo/c/cbunix
OLD_FILES+=usr/share/terminfo/c/cci
OLD_FILES+=usr/share/terminfo/c/cci1
OLD_FILES+=usr/share/terminfo/c/cdc456
OLD_FILES+=usr/share/terminfo/c/cdc721
OLD_FILES+=usr/share/terminfo/c/cdc721-esc
OLD_FILES+=usr/share/terminfo/c/cdc721ll
OLD_FILES+=usr/share/terminfo/c/cdc752
OLD_FILES+=usr/share/terminfo/c/cdc756
OLD_FILES+=usr/share/terminfo/c/cg7900
OLD_FILES+=usr/share/terminfo/c/cgc2
OLD_FILES+=usr/share/terminfo/c/cgc3
OLD_FILES+=usr/share/terminfo/c/chromatics
OLD_FILES+=usr/share/terminfo/c/ci8510
OLD_FILES+=usr/share/terminfo/c/cit-80
OLD_FILES+=usr/share/terminfo/c/cit101
OLD_FILES+=usr/share/terminfo/c/cit101e
OLD_FILES+=usr/share/terminfo/c/cit101e-132
OLD_FILES+=usr/share/terminfo/c/cit101e-n
OLD_FILES+=usr/share/terminfo/c/cit101e-n132
OLD_FILES+=usr/share/terminfo/c/cit101e-rv
OLD_FILES+=usr/share/terminfo/c/cit500
OLD_FILES+=usr/share/terminfo/c/cit80
OLD_FILES+=usr/share/terminfo/c/citc
OLD_FILES+=usr/share/terminfo/c/citoh
OLD_FILES+=usr/share/terminfo/c/citoh-6lpi
OLD_FILES+=usr/share/terminfo/c/citoh-8lpi
OLD_FILES+=usr/share/terminfo/c/citoh-comp
OLD_FILES+=usr/share/terminfo/c/citoh-elite
OLD_FILES+=usr/share/terminfo/c/citoh-pica
OLD_FILES+=usr/share/terminfo/c/citoh-prop
OLD_FILES+=usr/share/terminfo/c/citoh-ps
OLD_FILES+=usr/share/terminfo/c/coco3
OLD_FILES+=usr/share/terminfo/c/coherent
OLD_FILES+=usr/share/terminfo/c/color_xterm
OLD_FILES+=usr/share/terminfo/c/colorscan
OLD_FILES+=usr/share/terminfo/c/commodore
OLD_FILES+=usr/share/terminfo/c/concept
OLD_FILES+=usr/share/terminfo/c/concept-avt
OLD_FILES+=usr/share/terminfo/c/concept100
OLD_FILES+=usr/share/terminfo/c/concept100-rv
OLD_FILES+=usr/share/terminfo/c/concept108
OLD_FILES+=usr/share/terminfo/c/concept108-4p
OLD_FILES+=usr/share/terminfo/c/concept108-8p
OLD_FILES+=usr/share/terminfo/c/concept108-w-8
OLD_FILES+=usr/share/terminfo/c/concept108-w8p
OLD_FILES+=usr/share/terminfo/c/concept108rv4p
OLD_FILES+=usr/share/terminfo/c/cons25
OLD_FILES+=usr/share/terminfo/c/cons25-debian
OLD_FILES+=usr/share/terminfo/c/cons25-iso-m
OLD_FILES+=usr/share/terminfo/c/cons25-iso8859
OLD_FILES+=usr/share/terminfo/c/cons25-koi8-r
OLD_FILES+=usr/share/terminfo/c/cons25-koi8r-m
OLD_FILES+=usr/share/terminfo/c/cons25-m
OLD_FILES+=usr/share/terminfo/c/cons25l1
OLD_FILES+=usr/share/terminfo/c/cons25l1-m
OLD_FILES+=usr/share/terminfo/c/cons25r
OLD_FILES+=usr/share/terminfo/c/cons25r-m
OLD_FILES+=usr/share/terminfo/c/cons25w
OLD_FILES+=usr/share/terminfo/c/cons30
OLD_FILES+=usr/share/terminfo/c/cons30-m
OLD_FILES+=usr/share/terminfo/c/cons43
OLD_FILES+=usr/share/terminfo/c/cons43-m
OLD_FILES+=usr/share/terminfo/c/cons50
OLD_FILES+=usr/share/terminfo/c/cons50-iso-m
OLD_FILES+=usr/share/terminfo/c/cons50-iso8859
OLD_FILES+=usr/share/terminfo/c/cons50-koi8r
OLD_FILES+=usr/share/terminfo/c/cons50-koi8r-m
OLD_FILES+=usr/share/terminfo/c/cons50-m
OLD_FILES+=usr/share/terminfo/c/cons50l1
OLD_FILES+=usr/share/terminfo/c/cons50l1-m
OLD_FILES+=usr/share/terminfo/c/cons50r
OLD_FILES+=usr/share/terminfo/c/cons50r-m
OLD_FILES+=usr/share/terminfo/c/cons60
OLD_FILES+=usr/share/terminfo/c/cons60-iso
OLD_FILES+=usr/share/terminfo/c/cons60-iso-m
OLD_FILES+=usr/share/terminfo/c/cons60-koi8r
OLD_FILES+=usr/share/terminfo/c/cons60-koi8r-m
OLD_FILES+=usr/share/terminfo/c/cons60-m
OLD_FILES+=usr/share/terminfo/c/cons60l1
OLD_FILES+=usr/share/terminfo/c/cons60l1-m
OLD_FILES+=usr/share/terminfo/c/cons60r
OLD_FILES+=usr/share/terminfo/c/cons60r-m
OLD_FILES+=usr/share/terminfo/c/contel300
OLD_FILES+=usr/share/terminfo/c/contel301
OLD_FILES+=usr/share/terminfo/c/contel320
OLD_FILES+=usr/share/terminfo/c/contel321
OLD_FILES+=usr/share/terminfo/c/cops
OLD_FILES+=usr/share/terminfo/c/cops-10
OLD_FILES+=usr/share/terminfo/c/cops10
OLD_FILES+=usr/share/terminfo/c/crt
OLD_FILES+=usr/share/terminfo/c/crt-vt220
OLD_FILES+=usr/share/terminfo/c/cs10
OLD_FILES+=usr/share/terminfo/c/cs10-w
OLD_FILES+=usr/share/terminfo/c/ct82
OLD_FILES+=usr/share/terminfo/c/ct8500
OLD_FILES+=usr/share/terminfo/c/ctrm
OLD_FILES+=usr/share/terminfo/c/cx
OLD_FILES+=usr/share/terminfo/c/cx100
OLD_FILES+=usr/share/terminfo/c/cyb110
OLD_FILES+=usr/share/terminfo/c/cyb83
OLD_FILES+=usr/share/terminfo/c/cygwin
OLD_FILES+=usr/share/terminfo/c/cygwinB19
OLD_FILES+=usr/share/terminfo/c/cygwinDBG
OLD_DIRS+=usr/share/terminfo/c/
OLD_FILES+=usr/share/terminfo/d/d132
OLD_FILES+=usr/share/terminfo/d/d2
OLD_FILES+=usr/share/terminfo/d/d2-dg
OLD_FILES+=usr/share/terminfo/d/d200
OLD_FILES+=usr/share/terminfo/d/d200-dg
OLD_FILES+=usr/share/terminfo/d/d210
OLD_FILES+=usr/share/terminfo/d/d210-dg
OLD_FILES+=usr/share/terminfo/d/d211
OLD_FILES+=usr/share/terminfo/d/d211-7b
OLD_FILES+=usr/share/terminfo/d/d211-dg
OLD_FILES+=usr/share/terminfo/d/d214
OLD_FILES+=usr/share/terminfo/d/d214-dg
OLD_FILES+=usr/share/terminfo/d/d215
OLD_FILES+=usr/share/terminfo/d/d215-7b
OLD_FILES+=usr/share/terminfo/d/d215-dg
OLD_FILES+=usr/share/terminfo/d/d216+
OLD_FILES+=usr/share/terminfo/d/d216+25
OLD_FILES+=usr/share/terminfo/d/d216+dg
OLD_FILES+=usr/share/terminfo/d/d216-dg
OLD_FILES+=usr/share/terminfo/d/d216-unix
OLD_FILES+=usr/share/terminfo/d/d216-unix-25
OLD_FILES+=usr/share/terminfo/d/d216e+
OLD_FILES+=usr/share/terminfo/d/d216e+dg
OLD_FILES+=usr/share/terminfo/d/d216e-dg
OLD_FILES+=usr/share/terminfo/d/d216e-unix
OLD_FILES+=usr/share/terminfo/d/d217-dg
OLD_FILES+=usr/share/terminfo/d/d217-unix
OLD_FILES+=usr/share/terminfo/d/d217-unix-25
OLD_FILES+=usr/share/terminfo/d/d220
OLD_FILES+=usr/share/terminfo/d/d220-7b
OLD_FILES+=usr/share/terminfo/d/d220-dg
OLD_FILES+=usr/share/terminfo/d/d230
OLD_FILES+=usr/share/terminfo/d/d230-dg
OLD_FILES+=usr/share/terminfo/d/d230c
OLD_FILES+=usr/share/terminfo/d/d230c-dg
OLD_FILES+=usr/share/terminfo/d/d400
OLD_FILES+=usr/share/terminfo/d/d400-dg
OLD_FILES+=usr/share/terminfo/d/d410
OLD_FILES+=usr/share/terminfo/d/d410-7b
OLD_FILES+=usr/share/terminfo/d/d410-7b-w
OLD_FILES+=usr/share/terminfo/d/d410-dg
OLD_FILES+=usr/share/terminfo/d/d410-w
OLD_FILES+=usr/share/terminfo/d/d411
OLD_FILES+=usr/share/terminfo/d/d411-7b
OLD_FILES+=usr/share/terminfo/d/d411-7b-w
OLD_FILES+=usr/share/terminfo/d/d411-dg
OLD_FILES+=usr/share/terminfo/d/d411-w
OLD_FILES+=usr/share/terminfo/d/d412+
OLD_FILES+=usr/share/terminfo/d/d412+25
OLD_FILES+=usr/share/terminfo/d/d412+dg
OLD_FILES+=usr/share/terminfo/d/d412+s
OLD_FILES+=usr/share/terminfo/d/d412+sr
OLD_FILES+=usr/share/terminfo/d/d412+w
OLD_FILES+=usr/share/terminfo/d/d412-dg
OLD_FILES+=usr/share/terminfo/d/d412-unix
OLD_FILES+=usr/share/terminfo/d/d412-unix-25
OLD_FILES+=usr/share/terminfo/d/d412-unix-s
OLD_FILES+=usr/share/terminfo/d/d412-unix-sr
OLD_FILES+=usr/share/terminfo/d/d412-unix-w
OLD_FILES+=usr/share/terminfo/d/d413-dg
OLD_FILES+=usr/share/terminfo/d/d413-unix
OLD_FILES+=usr/share/terminfo/d/d413-unix-25
OLD_FILES+=usr/share/terminfo/d/d413-unix-s
OLD_FILES+=usr/share/terminfo/d/d413-unix-sr
OLD_FILES+=usr/share/terminfo/d/d413-unix-w
OLD_FILES+=usr/share/terminfo/d/d414-unix
OLD_FILES+=usr/share/terminfo/d/d414-unix-25
OLD_FILES+=usr/share/terminfo/d/d414-unix-s
OLD_FILES+=usr/share/terminfo/d/d414-unix-sr
OLD_FILES+=usr/share/terminfo/d/d414-unix-w
OLD_FILES+=usr/share/terminfo/d/d430-dg
OLD_FILES+=usr/share/terminfo/d/d430-dg-ccc
OLD_FILES+=usr/share/terminfo/d/d430-unix
OLD_FILES+=usr/share/terminfo/d/d430-unix-25
OLD_FILES+=usr/share/terminfo/d/d430-unix-25-ccc
OLD_FILES+=usr/share/terminfo/d/d430-unix-ccc
OLD_FILES+=usr/share/terminfo/d/d430-unix-s
OLD_FILES+=usr/share/terminfo/d/d430-unix-s-ccc
OLD_FILES+=usr/share/terminfo/d/d430-unix-sr
OLD_FILES+=usr/share/terminfo/d/d430-unix-sr-ccc
OLD_FILES+=usr/share/terminfo/d/d430-unix-w
OLD_FILES+=usr/share/terminfo/d/d430-unix-w-ccc
OLD_FILES+=usr/share/terminfo/d/d430c-dg
OLD_FILES+=usr/share/terminfo/d/d430c-dg-ccc
OLD_FILES+=usr/share/terminfo/d/d430c-unix
OLD_FILES+=usr/share/terminfo/d/d430c-unix-25
OLD_FILES+=usr/share/terminfo/d/d430c-unix-25-ccc
OLD_FILES+=usr/share/terminfo/d/d430c-unix-ccc
OLD_FILES+=usr/share/terminfo/d/d430c-unix-s
OLD_FILES+=usr/share/terminfo/d/d430c-unix-s-ccc
OLD_FILES+=usr/share/terminfo/d/d430c-unix-sr
OLD_FILES+=usr/share/terminfo/d/d430c-unix-sr-ccc
OLD_FILES+=usr/share/terminfo/d/d430c-unix-w
OLD_FILES+=usr/share/terminfo/d/d430c-unix-w-ccc
OLD_FILES+=usr/share/terminfo/d/d450
OLD_FILES+=usr/share/terminfo/d/d450-dg
OLD_FILES+=usr/share/terminfo/d/d460
OLD_FILES+=usr/share/terminfo/d/d460-7b
OLD_FILES+=usr/share/terminfo/d/d460-7b-w
OLD_FILES+=usr/share/terminfo/d/d460-dg
OLD_FILES+=usr/share/terminfo/d/d460-w
OLD_FILES+=usr/share/terminfo/d/d461
OLD_FILES+=usr/share/terminfo/d/d461-7b
OLD_FILES+=usr/share/terminfo/d/d461-7b-w
OLD_FILES+=usr/share/terminfo/d/d461-dg
OLD_FILES+=usr/share/terminfo/d/d461-w
OLD_FILES+=usr/share/terminfo/d/d462+
OLD_FILES+=usr/share/terminfo/d/d462+25
OLD_FILES+=usr/share/terminfo/d/d462+dg
OLD_FILES+=usr/share/terminfo/d/d462+s
OLD_FILES+=usr/share/terminfo/d/d462+sr
OLD_FILES+=usr/share/terminfo/d/d462+w
OLD_FILES+=usr/share/terminfo/d/d462-dg
OLD_FILES+=usr/share/terminfo/d/d462-unix
OLD_FILES+=usr/share/terminfo/d/d462-unix-25
OLD_FILES+=usr/share/terminfo/d/d462-unix-s
OLD_FILES+=usr/share/terminfo/d/d462-unix-sr
OLD_FILES+=usr/share/terminfo/d/d462-unix-w
OLD_FILES+=usr/share/terminfo/d/d462e-dg
OLD_FILES+=usr/share/terminfo/d/d463-dg
OLD_FILES+=usr/share/terminfo/d/d463-unix
OLD_FILES+=usr/share/terminfo/d/d463-unix-25
OLD_FILES+=usr/share/terminfo/d/d463-unix-s
OLD_FILES+=usr/share/terminfo/d/d463-unix-sr
OLD_FILES+=usr/share/terminfo/d/d463-unix-w
OLD_FILES+=usr/share/terminfo/d/d464-unix
OLD_FILES+=usr/share/terminfo/d/d464-unix-25
OLD_FILES+=usr/share/terminfo/d/d464-unix-s
OLD_FILES+=usr/share/terminfo/d/d464-unix-sr
OLD_FILES+=usr/share/terminfo/d/d464-unix-w
OLD_FILES+=usr/share/terminfo/d/d470
OLD_FILES+=usr/share/terminfo/d/d470-7b
OLD_FILES+=usr/share/terminfo/d/d470-dg
OLD_FILES+=usr/share/terminfo/d/d470c
OLD_FILES+=usr/share/terminfo/d/d470c-7b
OLD_FILES+=usr/share/terminfo/d/d470c-dg
OLD_FILES+=usr/share/terminfo/d/d555
OLD_FILES+=usr/share/terminfo/d/d555-7b
OLD_FILES+=usr/share/terminfo/d/d555-7b-w
OLD_FILES+=usr/share/terminfo/d/d555-dg
OLD_FILES+=usr/share/terminfo/d/d555-w
OLD_FILES+=usr/share/terminfo/d/d577
OLD_FILES+=usr/share/terminfo/d/d577-7b
OLD_FILES+=usr/share/terminfo/d/d577-7b-w
OLD_FILES+=usr/share/terminfo/d/d577-dg
OLD_FILES+=usr/share/terminfo/d/d577-w
OLD_FILES+=usr/share/terminfo/d/d578
OLD_FILES+=usr/share/terminfo/d/d578-7b
OLD_FILES+=usr/share/terminfo/d/d578-dg
OLD_FILES+=usr/share/terminfo/d/d80
OLD_FILES+=usr/share/terminfo/d/d800
OLD_FILES+=usr/share/terminfo/d/darwin
OLD_FILES+=usr/share/terminfo/d/darwin-100x37
OLD_FILES+=usr/share/terminfo/d/darwin-100x37-m
OLD_FILES+=usr/share/terminfo/d/darwin-112x37
OLD_FILES+=usr/share/terminfo/d/darwin-112x37-m
OLD_FILES+=usr/share/terminfo/d/darwin-128x40
OLD_FILES+=usr/share/terminfo/d/darwin-128x40-m
OLD_FILES+=usr/share/terminfo/d/darwin-128x48
OLD_FILES+=usr/share/terminfo/d/darwin-128x48-m
OLD_FILES+=usr/share/terminfo/d/darwin-144x48
OLD_FILES+=usr/share/terminfo/d/darwin-144x48-m
OLD_FILES+=usr/share/terminfo/d/darwin-160x64
OLD_FILES+=usr/share/terminfo/d/darwin-160x64-m
OLD_FILES+=usr/share/terminfo/d/darwin-200x64
OLD_FILES+=usr/share/terminfo/d/darwin-200x64-m
OLD_FILES+=usr/share/terminfo/d/darwin-200x75
OLD_FILES+=usr/share/terminfo/d/darwin-200x75-m
OLD_FILES+=usr/share/terminfo/d/darwin-256x96
OLD_FILES+=usr/share/terminfo/d/darwin-256x96-m
OLD_FILES+=usr/share/terminfo/d/darwin-80x25
OLD_FILES+=usr/share/terminfo/d/darwin-80x25-m
OLD_FILES+=usr/share/terminfo/d/darwin-80x30
OLD_FILES+=usr/share/terminfo/d/darwin-80x30-m
OLD_FILES+=usr/share/terminfo/d/darwin-90x30
OLD_FILES+=usr/share/terminfo/d/darwin-90x30-m
OLD_FILES+=usr/share/terminfo/d/darwin-b
OLD_FILES+=usr/share/terminfo/d/darwin-f
OLD_FILES+=usr/share/terminfo/d/darwin-f2
OLD_FILES+=usr/share/terminfo/d/darwin-m
OLD_FILES+=usr/share/terminfo/d/darwin-m-b
OLD_FILES+=usr/share/terminfo/d/darwin-m-f
OLD_FILES+=usr/share/terminfo/d/darwin-m-f2
OLD_FILES+=usr/share/terminfo/d/datagraphix
OLD_FILES+=usr/share/terminfo/d/datamedia2500
OLD_FILES+=usr/share/terminfo/d/datapoint
OLD_FILES+=usr/share/terminfo/d/dataspeed40
OLD_FILES+=usr/share/terminfo/d/dd5000
OLD_FILES+=usr/share/terminfo/d/ddr
OLD_FILES+=usr/share/terminfo/d/ddr3180
OLD_FILES+=usr/share/terminfo/d/dec+pp
OLD_FILES+=usr/share/terminfo/d/dec+sl
OLD_FILES+=usr/share/terminfo/d/dec-vt100
OLD_FILES+=usr/share/terminfo/d/dec-vt220
OLD_FILES+=usr/share/terminfo/d/dec-vt330
OLD_FILES+=usr/share/terminfo/d/dec-vt340
OLD_FILES+=usr/share/terminfo/d/dec-vt400
OLD_FILES+=usr/share/terminfo/d/decansi
OLD_FILES+=usr/share/terminfo/d/decpro
OLD_FILES+=usr/share/terminfo/d/decwriter
OLD_FILES+=usr/share/terminfo/d/delta
OLD_FILES+=usr/share/terminfo/d/dg+ccc
OLD_FILES+=usr/share/terminfo/d/dg+color
OLD_FILES+=usr/share/terminfo/d/dg+color8
OLD_FILES+=usr/share/terminfo/d/dg+fixed
OLD_FILES+=usr/share/terminfo/d/dg-ansi
OLD_FILES+=usr/share/terminfo/d/dg-generic
OLD_FILES+=usr/share/terminfo/d/dg100
OLD_FILES+=usr/share/terminfo/d/dg200
OLD_FILES+=usr/share/terminfo/d/dg210
OLD_FILES+=usr/share/terminfo/d/dg211
OLD_FILES+=usr/share/terminfo/d/dg450
OLD_FILES+=usr/share/terminfo/d/dg460-ansi
OLD_FILES+=usr/share/terminfo/d/dg6053
OLD_FILES+=usr/share/terminfo/d/dg6053-old
OLD_FILES+=usr/share/terminfo/d/dg605x
OLD_FILES+=usr/share/terminfo/d/dg6134
OLD_FILES+=usr/share/terminfo/d/dgkeys+11
OLD_FILES+=usr/share/terminfo/d/dgkeys+15
OLD_FILES+=usr/share/terminfo/d/dgkeys+7b
OLD_FILES+=usr/share/terminfo/d/dgkeys+8b
OLD_FILES+=usr/share/terminfo/d/dgmode+color
OLD_FILES+=usr/share/terminfo/d/dgmode+color8
OLD_FILES+=usr/share/terminfo/d/dgunix+ccc
OLD_FILES+=usr/share/terminfo/d/dgunix+fixed
OLD_FILES+=usr/share/terminfo/d/diablo
OLD_FILES+=usr/share/terminfo/d/diablo-lm
OLD_FILES+=usr/share/terminfo/d/diablo1620
OLD_FILES+=usr/share/terminfo/d/diablo1620-m8
OLD_FILES+=usr/share/terminfo/d/diablo1640
OLD_FILES+=usr/share/terminfo/d/diablo1640-lm
OLD_FILES+=usr/share/terminfo/d/diablo1640-m8
OLD_FILES+=usr/share/terminfo/d/diablo1720
OLD_FILES+=usr/share/terminfo/d/diablo1730
OLD_FILES+=usr/share/terminfo/d/diablo1740
OLD_FILES+=usr/share/terminfo/d/diablo1740-lm
OLD_FILES+=usr/share/terminfo/d/diablo450
OLD_FILES+=usr/share/terminfo/d/diablo630
OLD_FILES+=usr/share/terminfo/d/dialogue
OLD_FILES+=usr/share/terminfo/d/dialogue80
OLD_FILES+=usr/share/terminfo/d/digilog
OLD_FILES+=usr/share/terminfo/d/djgpp
OLD_FILES+=usr/share/terminfo/d/djgpp203
OLD_FILES+=usr/share/terminfo/d/djgpp204
OLD_FILES+=usr/share/terminfo/d/dku7003
OLD_FILES+=usr/share/terminfo/d/dku7003-dumb
OLD_FILES+=usr/share/terminfo/d/dku7102
OLD_FILES+=usr/share/terminfo/d/dku7102-old
OLD_FILES+=usr/share/terminfo/d/dku7102-sna
OLD_FILES+=usr/share/terminfo/d/dku7103-sna
OLD_FILES+=usr/share/terminfo/d/dku7202
OLD_FILES+=usr/share/terminfo/d/dm1520
OLD_FILES+=usr/share/terminfo/d/dm1521
OLD_FILES+=usr/share/terminfo/d/dm2500
OLD_FILES+=usr/share/terminfo/d/dm3025
OLD_FILES+=usr/share/terminfo/d/dm3045
OLD_FILES+=usr/share/terminfo/d/dm80
OLD_FILES+=usr/share/terminfo/d/dm80w
OLD_FILES+=usr/share/terminfo/d/dmchat
OLD_FILES+=usr/share/terminfo/d/dmd
OLD_FILES+=usr/share/terminfo/d/dmd-24
OLD_FILES+=usr/share/terminfo/d/dmd-34
OLD_FILES+=usr/share/terminfo/d/dmd1
OLD_FILES+=usr/share/terminfo/d/dmdt80
OLD_FILES+=usr/share/terminfo/d/dmdt80w
OLD_FILES+=usr/share/terminfo/d/dmterm
OLD_FILES+=usr/share/terminfo/d/domterm
OLD_FILES+=usr/share/terminfo/d/dp3360
OLD_FILES+=usr/share/terminfo/d/dp8242
OLD_FILES+=usr/share/terminfo/d/ds40
OLD_FILES+=usr/share/terminfo/d/ds40-2
OLD_FILES+=usr/share/terminfo/d/dt-100
OLD_FILES+=usr/share/terminfo/d/dt-100w
OLD_FILES+=usr/share/terminfo/d/dt100
OLD_FILES+=usr/share/terminfo/d/dt100w
OLD_FILES+=usr/share/terminfo/d/dt110
OLD_FILES+=usr/share/terminfo/d/dt80
OLD_FILES+=usr/share/terminfo/d/dt80-sas
OLD_FILES+=usr/share/terminfo/d/dt80w
OLD_FILES+=usr/share/terminfo/d/dtc300s
OLD_FILES+=usr/share/terminfo/d/dtc382
OLD_FILES+=usr/share/terminfo/d/dtterm
OLD_FILES+=usr/share/terminfo/d/dumb
OLD_FILES+=usr/share/terminfo/d/dumb-emacs-ansi
OLD_FILES+=usr/share/terminfo/d/dvtm
OLD_FILES+=usr/share/terminfo/d/dvtm-256color
OLD_FILES+=usr/share/terminfo/d/dw
OLD_FILES+=usr/share/terminfo/d/dw1
OLD_FILES+=usr/share/terminfo/d/dw2
OLD_FILES+=usr/share/terminfo/d/dw3
OLD_FILES+=usr/share/terminfo/d/dw4
OLD_FILES+=usr/share/terminfo/d/dwk
OLD_FILES+=usr/share/terminfo/d/dwk-vt
OLD_DIRS+=usr/share/terminfo/d/
OLD_FILES+=usr/share/terminfo/e/ecma+color
OLD_FILES+=usr/share/terminfo/e/ecma+index
OLD_FILES+=usr/share/terminfo/e/ecma+italics
OLD_FILES+=usr/share/terminfo/e/ecma+sgr
OLD_FILES+=usr/share/terminfo/e/ecma+strikeout
OLD_FILES+=usr/share/terminfo/e/elks
OLD_FILES+=usr/share/terminfo/e/elks-ansi
OLD_FILES+=usr/share/terminfo/e/elks-glasstty
OLD_FILES+=usr/share/terminfo/e/elks-vt52
OLD_FILES+=usr/share/terminfo/e/emots
OLD_FILES+=usr/share/terminfo/e/emu
OLD_FILES+=usr/share/terminfo/e/emu-220
OLD_FILES+=usr/share/terminfo/e/emx-base
OLD_FILES+=usr/share/terminfo/e/env230
OLD_FILES+=usr/share/terminfo/e/envision230
OLD_FILES+=usr/share/terminfo/e/ep40
OLD_FILES+=usr/share/terminfo/e/ep4000
OLD_FILES+=usr/share/terminfo/e/ep4080
OLD_FILES+=usr/share/terminfo/e/ep48
OLD_FILES+=usr/share/terminfo/e/ergo4000
OLD_FILES+=usr/share/terminfo/e/esprit
OLD_FILES+=usr/share/terminfo/e/esprit-am
OLD_FILES+=usr/share/terminfo/e/eterm
OLD_FILES+=usr/share/terminfo/e/eterm-color
OLD_FILES+=usr/share/terminfo/e/ex155
OLD_FILES+=usr/share/terminfo/e/excel62
OLD_FILES+=usr/share/terminfo/e/excel62-rv
OLD_FILES+=usr/share/terminfo/e/excel62-w
OLD_FILES+=usr/share/terminfo/e/excel64
OLD_FILES+=usr/share/terminfo/e/excel64-rv
OLD_FILES+=usr/share/terminfo/e/excel64-w
OLD_FILES+=usr/share/terminfo/e/exec80
OLD_DIRS+=usr/share/terminfo/e/
OLD_FILES+=usr/share/terminfo/f/f100
OLD_FILES+=usr/share/terminfo/f/f100-rv
OLD_FILES+=usr/share/terminfo/f/f110
OLD_FILES+=usr/share/terminfo/f/f110-14
OLD_FILES+=usr/share/terminfo/f/f110-14w
OLD_FILES+=usr/share/terminfo/f/f110-w
OLD_FILES+=usr/share/terminfo/f/f1720
OLD_FILES+=usr/share/terminfo/f/f1720a
OLD_FILES+=usr/share/terminfo/f/f200
OLD_FILES+=usr/share/terminfo/f/f200-w
OLD_FILES+=usr/share/terminfo/f/f200vi
OLD_FILES+=usr/share/terminfo/f/f200vi-w
OLD_FILES+=usr/share/terminfo/f/falco
OLD_FILES+=usr/share/terminfo/f/falco-p
OLD_FILES+=usr/share/terminfo/f/fbterm
OLD_FILES+=usr/share/terminfo/f/fenix
OLD_FILES+=usr/share/terminfo/f/fenixw
OLD_FILES+=usr/share/terminfo/f/fixterm
OLD_FILES+=usr/share/terminfo/f/fortune
OLD_FILES+=usr/share/terminfo/f/fos
OLD_FILES+=usr/share/terminfo/f/fox
OLD_FILES+=usr/share/terminfo/f/freedom
OLD_FILES+=usr/share/terminfo/f/freedom-rv
OLD_FILES+=usr/share/terminfo/f/freedom100
OLD_FILES+=usr/share/terminfo/f/freedom110
OLD_FILES+=usr/share/terminfo/f/freedom200
OLD_DIRS+=usr/share/terminfo/f/
OLD_FILES+=usr/share/terminfo/g/gator
OLD_FILES+=usr/share/terminfo/g/gator-52
OLD_FILES+=usr/share/terminfo/g/gator-52t
OLD_FILES+=usr/share/terminfo/g/gator-t
OLD_FILES+=usr/share/terminfo/g/gigi
OLD_FILES+=usr/share/terminfo/g/glasstty
OLD_FILES+=usr/share/terminfo/g/gnome
OLD_FILES+=usr/share/terminfo/g/gnome+pcfkeys
OLD_FILES+=usr/share/terminfo/g/gnome-2007
OLD_FILES+=usr/share/terminfo/g/gnome-2008
OLD_FILES+=usr/share/terminfo/g/gnome-2012
OLD_FILES+=usr/share/terminfo/g/gnome-256color
OLD_FILES+=usr/share/terminfo/g/gnome-fc5
OLD_FILES+=usr/share/terminfo/g/gnome-rh62
OLD_FILES+=usr/share/terminfo/g/gnome-rh72
OLD_FILES+=usr/share/terminfo/g/gnome-rh80
OLD_FILES+=usr/share/terminfo/g/gnome-rh90
OLD_FILES+=usr/share/terminfo/g/go-225
OLD_FILES+=usr/share/terminfo/g/go140
OLD_FILES+=usr/share/terminfo/g/go140w
OLD_FILES+=usr/share/terminfo/g/go225
OLD_FILES+=usr/share/terminfo/g/graphos
OLD_FILES+=usr/share/terminfo/g/graphos-30
OLD_FILES+=usr/share/terminfo/g/gs5430
OLD_FILES+=usr/share/terminfo/g/gs5430-22
OLD_FILES+=usr/share/terminfo/g/gs5430-24
OLD_FILES+=usr/share/terminfo/g/gs6300
OLD_FILES+=usr/share/terminfo/g/gsi
OLD_FILES+=usr/share/terminfo/g/gt100
OLD_FILES+=usr/share/terminfo/g/gt100a
OLD_FILES+=usr/share/terminfo/g/gt40
OLD_FILES+=usr/share/terminfo/g/gt42
OLD_FILES+=usr/share/terminfo/g/guru
OLD_FILES+=usr/share/terminfo/g/guru+rv
OLD_FILES+=usr/share/terminfo/g/guru+s
OLD_FILES+=usr/share/terminfo/g/guru+unk
OLD_FILES+=usr/share/terminfo/g/guru-24
OLD_FILES+=usr/share/terminfo/g/guru-33
OLD_FILES+=usr/share/terminfo/g/guru-33-rv
OLD_FILES+=usr/share/terminfo/g/guru-33-s
OLD_FILES+=usr/share/terminfo/g/guru-44
OLD_FILES+=usr/share/terminfo/g/guru-44-s
OLD_FILES+=usr/share/terminfo/g/guru-76
OLD_FILES+=usr/share/terminfo/g/guru-76-lp
OLD_FILES+=usr/share/terminfo/g/guru-76-s
OLD_FILES+=usr/share/terminfo/g/guru-76-w
OLD_FILES+=usr/share/terminfo/g/guru-76-w-s
OLD_FILES+=usr/share/terminfo/g/guru-76-wm
OLD_FILES+=usr/share/terminfo/g/guru-lp
OLD_FILES+=usr/share/terminfo/g/guru-nctxt
OLD_FILES+=usr/share/terminfo/g/guru-rv
OLD_FILES+=usr/share/terminfo/g/guru-s
OLD_DIRS+=usr/share/terminfo/g/
OLD_FILES+=usr/share/terminfo/h/h-100
OLD_FILES+=usr/share/terminfo/h/h-100bw
OLD_FILES+=usr/share/terminfo/h/h100
OLD_FILES+=usr/share/terminfo/h/h100bw
OLD_FILES+=usr/share/terminfo/h/h19
OLD_FILES+=usr/share/terminfo/h/h19-a
OLD_FILES+=usr/share/terminfo/h/h19-b
OLD_FILES+=usr/share/terminfo/h/h19-bs
OLD_FILES+=usr/share/terminfo/h/h19-g
OLD_FILES+=usr/share/terminfo/h/h19-smul
OLD_FILES+=usr/share/terminfo/h/h19-u
OLD_FILES+=usr/share/terminfo/h/h19-us
OLD_FILES+=usr/share/terminfo/h/h19a
OLD_FILES+=usr/share/terminfo/h/h19g
OLD_FILES+=usr/share/terminfo/h/h19k
OLD_FILES+=usr/share/terminfo/h/h19kermit
OLD_FILES+=usr/share/terminfo/h/h19us
OLD_FILES+=usr/share/terminfo/h/h29a-kc-bc
OLD_FILES+=usr/share/terminfo/h/h29a-kc-uc
OLD_FILES+=usr/share/terminfo/h/h29a-nkc-bc
OLD_FILES+=usr/share/terminfo/h/h29a-nkc-uc
OLD_FILES+=usr/share/terminfo/h/h80
OLD_FILES+=usr/share/terminfo/h/ha8675
OLD_FILES+=usr/share/terminfo/h/ha8686
OLD_FILES+=usr/share/terminfo/h/hazel
OLD_FILES+=usr/share/terminfo/h/hds200
OLD_FILES+=usr/share/terminfo/h/he80
OLD_FILES+=usr/share/terminfo/h/heath
OLD_FILES+=usr/share/terminfo/h/heath-19
OLD_FILES+=usr/share/terminfo/h/heath-ansi
OLD_FILES+=usr/share/terminfo/h/heathkit
OLD_FILES+=usr/share/terminfo/h/heathkit-a
OLD_FILES+=usr/share/terminfo/h/hft
OLD_FILES+=usr/share/terminfo/h/hft-c
OLD_FILES+=usr/share/terminfo/h/hft-c-old
OLD_FILES+=usr/share/terminfo/h/hft-old
OLD_FILES+=usr/share/terminfo/h/hirez100
OLD_FILES+=usr/share/terminfo/h/hirez100-w
OLD_FILES+=usr/share/terminfo/h/hmod1
OLD_FILES+=usr/share/terminfo/h/hp
OLD_FILES+=usr/share/terminfo/h/hp+arrows
OLD_FILES+=usr/share/terminfo/h/hp+color
OLD_FILES+=usr/share/terminfo/h/hp+labels
OLD_FILES+=usr/share/terminfo/h/hp+pfk+arrows
OLD_FILES+=usr/share/terminfo/h/hp+pfk+cr
OLD_FILES+=usr/share/terminfo/h/hp+pfk-cr
OLD_FILES+=usr/share/terminfo/h/hp+printer
OLD_FILES+=usr/share/terminfo/h/hp110
OLD_FILES+=usr/share/terminfo/h/hp150
OLD_FILES+=usr/share/terminfo/h/hp2
OLD_FILES+=usr/share/terminfo/h/hp236
OLD_FILES+=usr/share/terminfo/h/hp2382
OLD_FILES+=usr/share/terminfo/h/hp2382a
OLD_FILES+=usr/share/terminfo/h/hp2392
OLD_FILES+=usr/share/terminfo/h/hp2397
OLD_FILES+=usr/share/terminfo/h/hp2397a
OLD_FILES+=usr/share/terminfo/h/hp2621
OLD_FILES+=usr/share/terminfo/h/hp2621-48
OLD_FILES+=usr/share/terminfo/h/hp2621-a
OLD_FILES+=usr/share/terminfo/h/hp2621-ba
OLD_FILES+=usr/share/terminfo/h/hp2621-fl
OLD_FILES+=usr/share/terminfo/h/hp2621-k45
OLD_FILES+=usr/share/terminfo/h/hp2621-nl
OLD_FILES+=usr/share/terminfo/h/hp2621-nt
OLD_FILES+=usr/share/terminfo/h/hp2621-wl
OLD_FILES+=usr/share/terminfo/h/hp2621A
OLD_FILES+=usr/share/terminfo/h/hp2621a
OLD_FILES+=usr/share/terminfo/h/hp2621a-a
OLD_FILES+=usr/share/terminfo/h/hp2621b
OLD_FILES+=usr/share/terminfo/h/hp2621b-kx
OLD_FILES+=usr/share/terminfo/h/hp2621b-kx-p
OLD_FILES+=usr/share/terminfo/h/hp2621b-p
OLD_FILES+=usr/share/terminfo/h/hp2621k45
OLD_FILES+=usr/share/terminfo/h/hp2621p
OLD_FILES+=usr/share/terminfo/h/hp2621p-a
OLD_FILES+=usr/share/terminfo/h/hp2622
OLD_FILES+=usr/share/terminfo/h/hp2622a
OLD_FILES+=usr/share/terminfo/h/hp2623
OLD_FILES+=usr/share/terminfo/h/hp2623a
OLD_FILES+=usr/share/terminfo/h/hp2624
OLD_FILES+=usr/share/terminfo/h/hp2624-10p
OLD_FILES+=usr/share/terminfo/h/hp2624a
OLD_FILES+=usr/share/terminfo/h/hp2624a-10p
OLD_FILES+=usr/share/terminfo/h/hp2624b
OLD_FILES+=usr/share/terminfo/h/hp2624b-10p
OLD_FILES+=usr/share/terminfo/h/hp2624b-10p-p
OLD_FILES+=usr/share/terminfo/h/hp2624b-4p
OLD_FILES+=usr/share/terminfo/h/hp2624b-4p-p
OLD_FILES+=usr/share/terminfo/h/hp2624b-p
OLD_FILES+=usr/share/terminfo/h/hp2626
OLD_FILES+=usr/share/terminfo/h/hp2626-12
OLD_FILES+=usr/share/terminfo/h/hp2626-12-s
OLD_FILES+=usr/share/terminfo/h/hp2626-12x40
OLD_FILES+=usr/share/terminfo/h/hp2626-ns
OLD_FILES+=usr/share/terminfo/h/hp2626-s
OLD_FILES+=usr/share/terminfo/h/hp2626-x40
OLD_FILES+=usr/share/terminfo/h/hp2626a
OLD_FILES+=usr/share/terminfo/h/hp2626p
OLD_FILES+=usr/share/terminfo/h/hp2627a
OLD_FILES+=usr/share/terminfo/h/hp2627a-rev
OLD_FILES+=usr/share/terminfo/h/hp2627c
OLD_FILES+=usr/share/terminfo/h/hp262x
OLD_FILES+=usr/share/terminfo/h/hp2640a
OLD_FILES+=usr/share/terminfo/h/hp2640b
OLD_FILES+=usr/share/terminfo/h/hp2641a
OLD_FILES+=usr/share/terminfo/h/hp2644a
OLD_FILES+=usr/share/terminfo/h/hp2645
OLD_FILES+=usr/share/terminfo/h/hp2645a
OLD_FILES+=usr/share/terminfo/h/hp2647a
OLD_FILES+=usr/share/terminfo/h/hp2648
OLD_FILES+=usr/share/terminfo/h/hp2648a
OLD_FILES+=usr/share/terminfo/h/hp300h
OLD_FILES+=usr/share/terminfo/h/hp45
OLD_FILES+=usr/share/terminfo/h/hp700
OLD_FILES+=usr/share/terminfo/h/hp700-wy
OLD_FILES+=usr/share/terminfo/h/hp70092
OLD_FILES+=usr/share/terminfo/h/hp70092A
OLD_FILES+=usr/share/terminfo/h/hp70092a
OLD_FILES+=usr/share/terminfo/h/hp9837
OLD_FILES+=usr/share/terminfo/h/hp9845
OLD_FILES+=usr/share/terminfo/h/hp98550
OLD_FILES+=usr/share/terminfo/h/hp98550a
OLD_FILES+=usr/share/terminfo/h/hp98720
OLD_FILES+=usr/share/terminfo/h/hp98721
OLD_FILES+=usr/share/terminfo/h/hpansi
OLD_FILES+=usr/share/terminfo/h/hpex
OLD_FILES+=usr/share/terminfo/h/hpex2
OLD_FILES+=usr/share/terminfo/h/hpgeneric
OLD_FILES+=usr/share/terminfo/h/hpsub
OLD_FILES+=usr/share/terminfo/h/hpterm
OLD_FILES+=usr/share/terminfo/h/hpterm-color
OLD_FILES+=usr/share/terminfo/h/htx11
OLD_FILES+=usr/share/terminfo/h/hurd
OLD_FILES+=usr/share/terminfo/h/hz1000
OLD_FILES+=usr/share/terminfo/h/hz1420
OLD_FILES+=usr/share/terminfo/h/hz1500
OLD_FILES+=usr/share/terminfo/h/hz1510
OLD_FILES+=usr/share/terminfo/h/hz1520
OLD_FILES+=usr/share/terminfo/h/hz1520-noesc
OLD_FILES+=usr/share/terminfo/h/hz1552
OLD_FILES+=usr/share/terminfo/h/hz1552-rv
OLD_FILES+=usr/share/terminfo/h/hz2000
OLD_DIRS+=usr/share/terminfo/h/
OLD_FILES+=usr/share/terminfo/i/i100
OLD_FILES+=usr/share/terminfo/i/i3101
OLD_FILES+=usr/share/terminfo/i/i3164
OLD_FILES+=usr/share/terminfo/i/i400
OLD_FILES+=usr/share/terminfo/i/iTerm.app
OLD_FILES+=usr/share/terminfo/i/iTerm2.app
OLD_FILES+=usr/share/terminfo/i/ibcs2
OLD_FILES+=usr/share/terminfo/i/ibm+16color
OLD_FILES+=usr/share/terminfo/i/ibm+color
OLD_FILES+=usr/share/terminfo/i/ibm-apl
OLD_FILES+=usr/share/terminfo/i/ibm-pc
OLD_FILES+=usr/share/terminfo/i/ibm-system1
OLD_FILES+=usr/share/terminfo/i/ibm3101
OLD_FILES+=usr/share/terminfo/i/ibm3151
OLD_FILES+=usr/share/terminfo/i/ibm3161
OLD_FILES+=usr/share/terminfo/i/ibm3161-C
OLD_FILES+=usr/share/terminfo/i/ibm3162
OLD_FILES+=usr/share/terminfo/i/ibm3163
OLD_FILES+=usr/share/terminfo/i/ibm3164
OLD_FILES+=usr/share/terminfo/i/ibm327x
OLD_FILES+=usr/share/terminfo/i/ibm5051
OLD_FILES+=usr/share/terminfo/i/ibm5081
OLD_FILES+=usr/share/terminfo/i/ibm5081-c
OLD_FILES+=usr/share/terminfo/i/ibm5151
OLD_FILES+=usr/share/terminfo/i/ibm5154
OLD_FILES+=usr/share/terminfo/i/ibm5154-c
OLD_FILES+=usr/share/terminfo/i/ibm6153
OLD_FILES+=usr/share/terminfo/i/ibm6153-40
OLD_FILES+=usr/share/terminfo/i/ibm6153-90
OLD_FILES+=usr/share/terminfo/i/ibm6154
OLD_FILES+=usr/share/terminfo/i/ibm6154-c
OLD_FILES+=usr/share/terminfo/i/ibm6155
OLD_FILES+=usr/share/terminfo/i/ibm8503
OLD_FILES+=usr/share/terminfo/i/ibm8507
OLD_FILES+=usr/share/terminfo/i/ibm8512
OLD_FILES+=usr/share/terminfo/i/ibm8513
OLD_FILES+=usr/share/terminfo/i/ibm8514
OLD_FILES+=usr/share/terminfo/i/ibm8514-c
OLD_FILES+=usr/share/terminfo/i/ibm8604
OLD_FILES+=usr/share/terminfo/i/ibmaed
OLD_FILES+=usr/share/terminfo/i/ibmapa16
OLD_FILES+=usr/share/terminfo/i/ibmapa8
OLD_FILES+=usr/share/terminfo/i/ibmapa8c
OLD_FILES+=usr/share/terminfo/i/ibmapa8c-c
OLD_FILES+=usr/share/terminfo/i/ibmega
OLD_FILES+=usr/share/terminfo/i/ibmega-c
OLD_FILES+=usr/share/terminfo/i/ibmmono
OLD_FILES+=usr/share/terminfo/i/ibmmpel-c
OLD_FILES+=usr/share/terminfo/i/ibmpc
OLD_FILES+=usr/share/terminfo/i/ibmpc3
OLD_FILES+=usr/share/terminfo/i/ibmpc3r
OLD_FILES+=usr/share/terminfo/i/ibmpc3r-mono
OLD_FILES+=usr/share/terminfo/i/ibmpcx
OLD_FILES+=usr/share/terminfo/i/ibmvga
OLD_FILES+=usr/share/terminfo/i/ibmvga-c
OLD_FILES+=usr/share/terminfo/i/ibmx
OLD_FILES+=usr/share/terminfo/i/icl6402
OLD_FILES+=usr/share/terminfo/i/icl6404
OLD_FILES+=usr/share/terminfo/i/icl6404-w
OLD_FILES+=usr/share/terminfo/i/ifmr
OLD_FILES+=usr/share/terminfo/i/ims-ansi
OLD_FILES+=usr/share/terminfo/i/ims950
OLD_FILES+=usr/share/terminfo/i/ims950-b
OLD_FILES+=usr/share/terminfo/i/ims950-rv
OLD_FILES+=usr/share/terminfo/i/infoton
OLD_FILES+=usr/share/terminfo/i/interix
OLD_FILES+=usr/share/terminfo/i/interix-nti
OLD_FILES+=usr/share/terminfo/i/intertec
OLD_FILES+=usr/share/terminfo/i/intertube
OLD_FILES+=usr/share/terminfo/i/intertube2
OLD_FILES+=usr/share/terminfo/i/intext
OLD_FILES+=usr/share/terminfo/i/intext2
OLD_FILES+=usr/share/terminfo/i/intextii
OLD_FILES+=usr/share/terminfo/i/ips
OLD_FILES+=usr/share/terminfo/i/ipsi
OLD_FILES+=usr/share/terminfo/i/iq120
OLD_FILES+=usr/share/terminfo/i/iq140
OLD_FILES+=usr/share/terminfo/i/iris-ansi
OLD_FILES+=usr/share/terminfo/i/iris-ansi-ap
OLD_FILES+=usr/share/terminfo/i/iris-ansi-net
OLD_FILES+=usr/share/terminfo/i/iris-color
OLD_FILES+=usr/share/terminfo/i/iris40
OLD_FILES+=usr/share/terminfo/i/iterm
OLD_FILES+=usr/share/terminfo/i/iterm2
OLD_FILES+=usr/share/terminfo/i/iterm2-direct
OLD_DIRS+=usr/share/terminfo/i/
OLD_FILES+=usr/share/terminfo/j/jaixterm
OLD_FILES+=usr/share/terminfo/j/jaixterm-m
OLD_FILES+=usr/share/terminfo/j/jerq
OLD_FILES+=usr/share/terminfo/j/jfbterm
OLD_DIRS+=usr/share/terminfo/j/
OLD_FILES+=usr/share/terminfo/k/k45
OLD_FILES+=usr/share/terminfo/k/kaypro
OLD_FILES+=usr/share/terminfo/k/kaypro2
OLD_FILES+=usr/share/terminfo/k/kds6402
OLD_FILES+=usr/share/terminfo/k/kds7372
OLD_FILES+=usr/share/terminfo/k/kds7372-w
OLD_FILES+=usr/share/terminfo/k/kermit
OLD_FILES+=usr/share/terminfo/k/kermit-am
OLD_FILES+=usr/share/terminfo/k/kitty
OLD_FILES+=usr/share/terminfo/k/kitty+common
OLD_FILES+=usr/share/terminfo/k/kitty-direct
OLD_FILES+=usr/share/terminfo/k/klone+acs
OLD_FILES+=usr/share/terminfo/k/klone+color
OLD_FILES+=usr/share/terminfo/k/klone+koi8acs
OLD_FILES+=usr/share/terminfo/k/klone+sgr
OLD_FILES+=usr/share/terminfo/k/klone+sgr-dumb
OLD_FILES+=usr/share/terminfo/k/klone+sgr8
OLD_FILES+=usr/share/terminfo/k/kon
OLD_FILES+=usr/share/terminfo/k/kon2
OLD_FILES+=usr/share/terminfo/k/konsole
OLD_FILES+=usr/share/terminfo/k/konsole+pcfkeys
OLD_FILES+=usr/share/terminfo/k/konsole-16color
OLD_FILES+=usr/share/terminfo/k/konsole-256color
OLD_FILES+=usr/share/terminfo/k/konsole-base
OLD_FILES+=usr/share/terminfo/k/konsole-direct
OLD_FILES+=usr/share/terminfo/k/konsole-linux
OLD_FILES+=usr/share/terminfo/k/konsole-solaris
OLD_FILES+=usr/share/terminfo/k/konsole-vt100
OLD_FILES+=usr/share/terminfo/k/konsole-vt420pc
OLD_FILES+=usr/share/terminfo/k/konsole-xf3x
OLD_FILES+=usr/share/terminfo/k/konsole-xf4x
OLD_FILES+=usr/share/terminfo/k/kt7
OLD_FILES+=usr/share/terminfo/k/kt7ix
OLD_FILES+=usr/share/terminfo/k/kterm
OLD_FILES+=usr/share/terminfo/k/kterm-co
OLD_FILES+=usr/share/terminfo/k/kterm-color
OLD_FILES+=usr/share/terminfo/k/ktm
OLD_FILES+=usr/share/terminfo/k/kvt
OLD_DIRS+=usr/share/terminfo/k/
OLD_FILES+=usr/share/terminfo/l/la120
OLD_FILES+=usr/share/terminfo/l/layer
OLD_FILES+=usr/share/terminfo/l/lft
OLD_FILES+=usr/share/terminfo/l/lft-pc850
OLD_FILES+=usr/share/terminfo/l/linux
OLD_FILES+=usr/share/terminfo/l/linux+decid
OLD_FILES+=usr/share/terminfo/l/linux+sfkeys
OLD_FILES+=usr/share/terminfo/l/linux-16color
OLD_FILES+=usr/share/terminfo/l/linux-basic
OLD_FILES+=usr/share/terminfo/l/linux-c
OLD_FILES+=usr/share/terminfo/l/linux-c-nc
OLD_FILES+=usr/share/terminfo/l/linux-koi8
OLD_FILES+=usr/share/terminfo/l/linux-koi8r
OLD_FILES+=usr/share/terminfo/l/linux-lat
OLD_FILES+=usr/share/terminfo/l/linux-m
OLD_FILES+=usr/share/terminfo/l/linux-m1
OLD_FILES+=usr/share/terminfo/l/linux-m1b
OLD_FILES+=usr/share/terminfo/l/linux-m2
OLD_FILES+=usr/share/terminfo/l/linux-nic
OLD_FILES+=usr/share/terminfo/l/linux-s
OLD_FILES+=usr/share/terminfo/l/linux-vt
OLD_FILES+=usr/share/terminfo/l/linux2.2
OLD_FILES+=usr/share/terminfo/l/linux2.6
OLD_FILES+=usr/share/terminfo/l/linux2.6.26
OLD_FILES+=usr/share/terminfo/l/linux3.0
OLD_FILES+=usr/share/terminfo/l/lisa
OLD_FILES+=usr/share/terminfo/l/lisaterm
OLD_FILES+=usr/share/terminfo/l/lisaterm-w
OLD_FILES+=usr/share/terminfo/l/liswb
OLD_FILES+=usr/share/terminfo/l/ln03
OLD_FILES+=usr/share/terminfo/l/ln03-w
OLD_FILES+=usr/share/terminfo/l/lpr
OLD_FILES+=usr/share/terminfo/l/luna
OLD_FILES+=usr/share/terminfo/l/luna68k
OLD_DIRS+=usr/share/terminfo/l/
OLD_FILES+=usr/share/terminfo/m/m2-nam
OLD_FILES+=usr/share/terminfo/m/mac
OLD_FILES+=usr/share/terminfo/m/mac-w
OLD_FILES+=usr/share/terminfo/m/mach
OLD_FILES+=usr/share/terminfo/m/mach-bold
OLD_FILES+=usr/share/terminfo/m/mach-color
OLD_FILES+=usr/share/terminfo/m/mach-gnu
OLD_FILES+=usr/share/terminfo/m/mach-gnu-color
OLD_FILES+=usr/share/terminfo/m/macintosh
OLD_FILES+=usr/share/terminfo/m/macterminal-w
OLD_FILES+=usr/share/terminfo/m/mai
OLD_FILES+=usr/share/terminfo/m/masscomp
OLD_FILES+=usr/share/terminfo/m/masscomp1
OLD_FILES+=usr/share/terminfo/m/masscomp2
OLD_FILES+=usr/share/terminfo/m/mdl110
OLD_FILES+=usr/share/terminfo/m/megatek
OLD_FILES+=usr/share/terminfo/m/memhp
OLD_FILES+=usr/share/terminfo/m/mgr
OLD_FILES+=usr/share/terminfo/m/mgr-linux
OLD_FILES+=usr/share/terminfo/m/mgr-sun
OLD_FILES+=usr/share/terminfo/m/mgt
OLD_FILES+=usr/share/terminfo/m/mgterm
OLD_FILES+=usr/share/terminfo/m/microb
OLD_FILES+=usr/share/terminfo/m/microbee
OLD_FILES+=usr/share/terminfo/m/microterm
OLD_FILES+=usr/share/terminfo/m/microterm5
OLD_FILES+=usr/share/terminfo/m/mime
OLD_FILES+=usr/share/terminfo/m/mime-3ax
OLD_FILES+=usr/share/terminfo/m/mime-fb
OLD_FILES+=usr/share/terminfo/m/mime-hb
OLD_FILES+=usr/share/terminfo/m/mime1
OLD_FILES+=usr/share/terminfo/m/mime2
OLD_FILES+=usr/share/terminfo/m/mime2a
OLD_FILES+=usr/share/terminfo/m/mime2a-s
OLD_FILES+=usr/share/terminfo/m/mime2a-v
OLD_FILES+=usr/share/terminfo/m/mime314
OLD_FILES+=usr/share/terminfo/m/mime340
OLD_FILES+=usr/share/terminfo/m/mime3a
OLD_FILES+=usr/share/terminfo/m/mime3ax
OLD_FILES+=usr/share/terminfo/m/mimei
OLD_FILES+=usr/share/terminfo/m/mimeii
OLD_FILES+=usr/share/terminfo/m/minitel
OLD_FILES+=usr/share/terminfo/m/minitel-2
OLD_FILES+=usr/share/terminfo/m/minitel-2-nam
OLD_FILES+=usr/share/terminfo/m/minitel1
OLD_FILES+=usr/share/terminfo/m/minitel1-nb
OLD_FILES+=usr/share/terminfo/m/minitel12-80
OLD_FILES+=usr/share/terminfo/m/minitel1b
OLD_FILES+=usr/share/terminfo/m/minitel1b-80
OLD_FILES+=usr/share/terminfo/m/minitel1b-nb
OLD_FILES+=usr/share/terminfo/m/minitel2-80
OLD_FILES+=usr/share/terminfo/m/minix
OLD_FILES+=usr/share/terminfo/m/minix-1.5
OLD_FILES+=usr/share/terminfo/m/minix-1.7
OLD_FILES+=usr/share/terminfo/m/minix-3.0
OLD_FILES+=usr/share/terminfo/m/minix-old
OLD_FILES+=usr/share/terminfo/m/minix-old-am
OLD_FILES+=usr/share/terminfo/m/mintty
OLD_FILES+=usr/share/terminfo/m/mintty+common
OLD_FILES+=usr/share/terminfo/m/mintty-direct
OLD_FILES+=usr/share/terminfo/m/mlterm
OLD_FILES+=usr/share/terminfo/m/mlterm+pcfkeys
OLD_FILES+=usr/share/terminfo/m/mlterm-256color
OLD_FILES+=usr/share/terminfo/m/mlterm-direct
OLD_FILES+=usr/share/terminfo/m/mlterm2
OLD_FILES+=usr/share/terminfo/m/mlterm3
OLD_FILES+=usr/share/terminfo/m/mm314
OLD_FILES+=usr/share/terminfo/m/mm340
OLD_FILES+=usr/share/terminfo/m/mod
OLD_FILES+=usr/share/terminfo/m/mod24
OLD_FILES+=usr/share/terminfo/m/modgraph
OLD_FILES+=usr/share/terminfo/m/modgraph2
OLD_FILES+=usr/share/terminfo/m/modgraph48
OLD_FILES+=usr/share/terminfo/m/mono-emx
OLD_FILES+=usr/share/terminfo/m/morphos
OLD_FILES+=usr/share/terminfo/m/mouse-sun
OLD_FILES+=usr/share/terminfo/m/mrxvt
OLD_FILES+=usr/share/terminfo/m/mrxvt-256color
OLD_FILES+=usr/share/terminfo/m/ms-terminal
OLD_FILES+=usr/share/terminfo/m/ms-vt-utf8
OLD_FILES+=usr/share/terminfo/m/ms-vt100
OLD_FILES+=usr/share/terminfo/m/ms-vt100+
OLD_FILES+=usr/share/terminfo/m/ms-vt100-color
OLD_FILES+=usr/share/terminfo/m/msk227
OLD_FILES+=usr/share/terminfo/m/msk22714
OLD_FILES+=usr/share/terminfo/m/msk227am
OLD_FILES+=usr/share/terminfo/m/mskermit227
OLD_FILES+=usr/share/terminfo/m/mskermit22714
OLD_FILES+=usr/share/terminfo/m/mskermit227am
OLD_FILES+=usr/share/terminfo/m/mt-70
OLD_FILES+=usr/share/terminfo/m/mt4520-rv
OLD_FILES+=usr/share/terminfo/m/mt70
OLD_FILES+=usr/share/terminfo/m/mterm
OLD_FILES+=usr/share/terminfo/m/mterm-ansi
OLD_FILES+=usr/share/terminfo/m/mvterm
OLD_DIRS+=usr/share/terminfo/m/
OLD_FILES+=usr/share/terminfo/n/n7900
OLD_FILES+=usr/share/terminfo/n/nansi.sys
OLD_FILES+=usr/share/terminfo/n/nansi.sysk
OLD_FILES+=usr/share/terminfo/n/nansisys
OLD_FILES+=usr/share/terminfo/n/nansisysk
OLD_FILES+=usr/share/terminfo/n/ncr160vppp
OLD_FILES+=usr/share/terminfo/n/ncr160vpwpp
OLD_FILES+=usr/share/terminfo/n/ncr160vt100an
OLD_FILES+=usr/share/terminfo/n/ncr160vt100pp
OLD_FILES+=usr/share/terminfo/n/ncr160vt100wan
OLD_FILES+=usr/share/terminfo/n/ncr160vt100wpp
OLD_FILES+=usr/share/terminfo/n/ncr160vt200an
OLD_FILES+=usr/share/terminfo/n/ncr160vt200pp
OLD_FILES+=usr/share/terminfo/n/ncr160vt200wan
OLD_FILES+=usr/share/terminfo/n/ncr160vt200wpp
OLD_FILES+=usr/share/terminfo/n/ncr160vt300an
OLD_FILES+=usr/share/terminfo/n/ncr160vt300pp
OLD_FILES+=usr/share/terminfo/n/ncr160vt300wan
OLD_FILES+=usr/share/terminfo/n/ncr160vt300wpp
OLD_FILES+=usr/share/terminfo/n/ncr160wy50+pp
OLD_FILES+=usr/share/terminfo/n/ncr160wy50+wpp
OLD_FILES+=usr/share/terminfo/n/ncr160wy60pp
OLD_FILES+=usr/share/terminfo/n/ncr160wy60wpp
OLD_FILES+=usr/share/terminfo/n/ncr260intan
OLD_FILES+=usr/share/terminfo/n/ncr260intpp
OLD_FILES+=usr/share/terminfo/n/ncr260intwan
OLD_FILES+=usr/share/terminfo/n/ncr260intwpp
OLD_FILES+=usr/share/terminfo/n/ncr260vppp
OLD_FILES+=usr/share/terminfo/n/ncr260vpwpp
OLD_FILES+=usr/share/terminfo/n/ncr260vt100an
OLD_FILES+=usr/share/terminfo/n/ncr260vt100pp
OLD_FILES+=usr/share/terminfo/n/ncr260vt100wan
OLD_FILES+=usr/share/terminfo/n/ncr260vt100wpp
OLD_FILES+=usr/share/terminfo/n/ncr260vt200an
OLD_FILES+=usr/share/terminfo/n/ncr260vt200pp
OLD_FILES+=usr/share/terminfo/n/ncr260vt200wan
OLD_FILES+=usr/share/terminfo/n/ncr260vt200wpp
OLD_FILES+=usr/share/terminfo/n/ncr260vt300an
OLD_FILES+=usr/share/terminfo/n/ncr260vt300pp
OLD_FILES+=usr/share/terminfo/n/ncr260vt300wan
OLD_FILES+=usr/share/terminfo/n/ncr260vt300wpp
OLD_FILES+=usr/share/terminfo/n/ncr260wy325pp
OLD_FILES+=usr/share/terminfo/n/ncr260wy325wpp
OLD_FILES+=usr/share/terminfo/n/ncr260wy350pp
OLD_FILES+=usr/share/terminfo/n/ncr260wy350wpp
OLD_FILES+=usr/share/terminfo/n/ncr260wy50+pp
OLD_FILES+=usr/share/terminfo/n/ncr260wy50+wpp
OLD_FILES+=usr/share/terminfo/n/ncr260wy60pp
OLD_FILES+=usr/share/terminfo/n/ncr260wy60wpp
OLD_FILES+=usr/share/terminfo/n/ncr7900
OLD_FILES+=usr/share/terminfo/n/ncr7900i
OLD_FILES+=usr/share/terminfo/n/ncr7900iv
OLD_FILES+=usr/share/terminfo/n/ncr7901
OLD_FILES+=usr/share/terminfo/n/ncrvt100an
OLD_FILES+=usr/share/terminfo/n/ncrvt100pp
OLD_FILES+=usr/share/terminfo/n/ncrvt100wan
OLD_FILES+=usr/share/terminfo/n/ncrvt100wpp
OLD_FILES+=usr/share/terminfo/n/ncsa
OLD_FILES+=usr/share/terminfo/n/ncsa-m
OLD_FILES+=usr/share/terminfo/n/ncsa-m-ns
OLD_FILES+=usr/share/terminfo/n/ncsa-ns
OLD_FILES+=usr/share/terminfo/n/ncsa-vt220
OLD_FILES+=usr/share/terminfo/n/ncsa-vt220-8
OLD_FILES+=usr/share/terminfo/n/nd9500
OLD_FILES+=usr/share/terminfo/n/ndr9500
OLD_FILES+=usr/share/terminfo/n/ndr9500-25
OLD_FILES+=usr/share/terminfo/n/ndr9500-25-mc
OLD_FILES+=usr/share/terminfo/n/ndr9500-25-mc-nl
OLD_FILES+=usr/share/terminfo/n/ndr9500-25-nl
OLD_FILES+=usr/share/terminfo/n/ndr9500-mc
OLD_FILES+=usr/share/terminfo/n/ndr9500-mc-nl
OLD_FILES+=usr/share/terminfo/n/ndr9500-nl
OLD_FILES+=usr/share/terminfo/n/nec
OLD_FILES+=usr/share/terminfo/n/nec5520
OLD_FILES+=usr/share/terminfo/n/netbsd6
OLD_FILES+=usr/share/terminfo/n/newhp
OLD_FILES+=usr/share/terminfo/n/newhpkeyboard
OLD_FILES+=usr/share/terminfo/n/news
OLD_FILES+=usr/share/terminfo/n/news-29
OLD_FILES+=usr/share/terminfo/n/news-29-euc
OLD_FILES+=usr/share/terminfo/n/news-29-sjis
OLD_FILES+=usr/share/terminfo/n/news-33
OLD_FILES+=usr/share/terminfo/n/news-33-euc
OLD_FILES+=usr/share/terminfo/n/news-33-sjis
OLD_FILES+=usr/share/terminfo/n/news-42
OLD_FILES+=usr/share/terminfo/n/news-42-euc
OLD_FILES+=usr/share/terminfo/n/news-42-sjis
OLD_FILES+=usr/share/terminfo/n/news-a
OLD_FILES+=usr/share/terminfo/n/news-o
OLD_FILES+=usr/share/terminfo/n/news-old-unk
OLD_FILES+=usr/share/terminfo/n/news-unk
OLD_FILES+=usr/share/terminfo/n/news28
OLD_FILES+=usr/share/terminfo/n/news28-a
OLD_FILES+=usr/share/terminfo/n/news29
OLD_FILES+=usr/share/terminfo/n/news31
OLD_FILES+=usr/share/terminfo/n/news31-a
OLD_FILES+=usr/share/terminfo/n/news31-o
OLD_FILES+=usr/share/terminfo/n/news33
OLD_FILES+=usr/share/terminfo/n/news40
OLD_FILES+=usr/share/terminfo/n/news40-a
OLD_FILES+=usr/share/terminfo/n/news40-o
OLD_FILES+=usr/share/terminfo/n/news42
OLD_FILES+=usr/share/terminfo/n/newscbm
OLD_FILES+=usr/share/terminfo/n/newscbm-a
OLD_FILES+=usr/share/terminfo/n/newscbm-o
OLD_FILES+=usr/share/terminfo/n/newscbm33
OLD_FILES+=usr/share/terminfo/n/next
OLD_FILES+=usr/share/terminfo/n/nextshell
OLD_FILES+=usr/share/terminfo/n/northstar
OLD_FILES+=usr/share/terminfo/n/nsterm
OLD_FILES+=usr/share/terminfo/n/nsterm+7
OLD_FILES+=usr/share/terminfo/n/nsterm+acs
OLD_FILES+=usr/share/terminfo/n/nsterm+c
OLD_FILES+=usr/share/terminfo/n/nsterm+c41
OLD_FILES+=usr/share/terminfo/n/nsterm+mac
OLD_FILES+=usr/share/terminfo/n/nsterm+s
OLD_FILES+=usr/share/terminfo/n/nsterm-16color
OLD_FILES+=usr/share/terminfo/n/nsterm-256color
OLD_FILES+=usr/share/terminfo/n/nsterm-7
OLD_FILES+=usr/share/terminfo/n/nsterm-7-c
OLD_FILES+=usr/share/terminfo/n/nsterm-7-c-s
OLD_FILES+=usr/share/terminfo/n/nsterm-7-m
OLD_FILES+=usr/share/terminfo/n/nsterm-7-m-s
OLD_FILES+=usr/share/terminfo/n/nsterm-7-s
OLD_FILES+=usr/share/terminfo/n/nsterm-acs
OLD_FILES+=usr/share/terminfo/n/nsterm-acs-c
OLD_FILES+=usr/share/terminfo/n/nsterm-acs-c-s
OLD_FILES+=usr/share/terminfo/n/nsterm-acs-m
OLD_FILES+=usr/share/terminfo/n/nsterm-acs-m-s
OLD_FILES+=usr/share/terminfo/n/nsterm-acs-s
OLD_FILES+=usr/share/terminfo/n/nsterm-bce
OLD_FILES+=usr/share/terminfo/n/nsterm-build309
OLD_FILES+=usr/share/terminfo/n/nsterm-build326
OLD_FILES+=usr/share/terminfo/n/nsterm-build343
OLD_FILES+=usr/share/terminfo/n/nsterm-build361
OLD_FILES+=usr/share/terminfo/n/nsterm-build400
OLD_FILES+=usr/share/terminfo/n/nsterm-c
OLD_FILES+=usr/share/terminfo/n/nsterm-c-7
OLD_FILES+=usr/share/terminfo/n/nsterm-c-acs
OLD_FILES+=usr/share/terminfo/n/nsterm-c-s
OLD_FILES+=usr/share/terminfo/n/nsterm-c-s-7
OLD_FILES+=usr/share/terminfo/n/nsterm-c-s-acs
OLD_FILES+=usr/share/terminfo/n/nsterm-direct
OLD_FILES+=usr/share/terminfo/n/nsterm-m
OLD_FILES+=usr/share/terminfo/n/nsterm-m-7
OLD_FILES+=usr/share/terminfo/n/nsterm-m-acs
OLD_FILES+=usr/share/terminfo/n/nsterm-m-s
OLD_FILES+=usr/share/terminfo/n/nsterm-m-s-7
OLD_FILES+=usr/share/terminfo/n/nsterm-m-s-acs
OLD_FILES+=usr/share/terminfo/n/nsterm-old
OLD_FILES+=usr/share/terminfo/n/nsterm-s
OLD_FILES+=usr/share/terminfo/n/nsterm-s-7
OLD_FILES+=usr/share/terminfo/n/nsterm-s-acs
OLD_FILES+=usr/share/terminfo/n/ntconsole
OLD_FILES+=usr/share/terminfo/n/ntconsole-100
OLD_FILES+=usr/share/terminfo/n/ntconsole-100-nti
OLD_FILES+=usr/share/terminfo/n/ntconsole-25
OLD_FILES+=usr/share/terminfo/n/ntconsole-25-nti
OLD_FILES+=usr/share/terminfo/n/ntconsole-25-w
OLD_FILES+=usr/share/terminfo/n/ntconsole-25-w-vt
OLD_FILES+=usr/share/terminfo/n/ntconsole-35
OLD_FILES+=usr/share/terminfo/n/ntconsole-35-nti
OLD_FILES+=usr/share/terminfo/n/ntconsole-35-w
OLD_FILES+=usr/share/terminfo/n/ntconsole-50
OLD_FILES+=usr/share/terminfo/n/ntconsole-50-nti
OLD_FILES+=usr/share/terminfo/n/ntconsole-50-w
OLD_FILES+=usr/share/terminfo/n/ntconsole-60
OLD_FILES+=usr/share/terminfo/n/ntconsole-60-nti
OLD_FILES+=usr/share/terminfo/n/ntconsole-60-w
OLD_FILES+=usr/share/terminfo/n/ntconsole-w
OLD_FILES+=usr/share/terminfo/n/ntconsole-w-vt
OLD_FILES+=usr/share/terminfo/n/nwe501
OLD_FILES+=usr/share/terminfo/n/nwe501-a
OLD_FILES+=usr/share/terminfo/n/nwe501-o
OLD_FILES+=usr/share/terminfo/n/nwp-511
OLD_FILES+=usr/share/terminfo/n/nwp-517
OLD_FILES+=usr/share/terminfo/n/nwp-517-w
OLD_FILES+=usr/share/terminfo/n/nwp251-a
OLD_FILES+=usr/share/terminfo/n/nwp251-o
OLD_FILES+=usr/share/terminfo/n/nwp511
OLD_FILES+=usr/share/terminfo/n/nwp512
OLD_FILES+=usr/share/terminfo/n/nwp512-a
OLD_FILES+=usr/share/terminfo/n/nwp512-o
OLD_FILES+=usr/share/terminfo/n/nwp513
OLD_FILES+=usr/share/terminfo/n/nwp513-a
OLD_FILES+=usr/share/terminfo/n/nwp513-o
OLD_FILES+=usr/share/terminfo/n/nwp514
OLD_FILES+=usr/share/terminfo/n/nwp514-a
OLD_FILES+=usr/share/terminfo/n/nwp514-o
OLD_FILES+=usr/share/terminfo/n/nwp517
OLD_FILES+=usr/share/terminfo/n/nwp517-w
OLD_FILES+=usr/share/terminfo/n/nwp518
OLD_FILES+=usr/share/terminfo/n/nwp518-a
OLD_FILES+=usr/share/terminfo/n/nwp518-o
OLD_FILES+=usr/share/terminfo/n/nxterm
OLD_DIRS+=usr/share/terminfo/n/
OLD_FILES+=usr/share/terminfo/o/o31
OLD_FILES+=usr/share/terminfo/o/o4112-nd
OLD_FILES+=usr/share/terminfo/o/o85h
OLD_FILES+=usr/share/terminfo/o/oabm85h
OLD_FILES+=usr/share/terminfo/o/oblit
OLD_FILES+=usr/share/terminfo/o/oc100
OLD_FILES+=usr/share/terminfo/o/oconcept
OLD_FILES+=usr/share/terminfo/o/ofcons
OLD_FILES+=usr/share/terminfo/o/ojerq
OLD_FILES+=usr/share/terminfo/o/old-st
OLD_FILES+=usr/share/terminfo/o/oldibmpc3
OLD_FILES+=usr/share/terminfo/o/oldpc3
OLD_FILES+=usr/share/terminfo/o/oldsun
OLD_FILES+=usr/share/terminfo/o/omron
OLD_FILES+=usr/share/terminfo/o/opennt
OLD_FILES+=usr/share/terminfo/o/opennt-100
OLD_FILES+=usr/share/terminfo/o/opennt-100-nti
OLD_FILES+=usr/share/terminfo/o/opennt-25
OLD_FILES+=usr/share/terminfo/o/opennt-25-nti
OLD_FILES+=usr/share/terminfo/o/opennt-25-w
OLD_FILES+=usr/share/terminfo/o/opennt-25-w-vt
OLD_FILES+=usr/share/terminfo/o/opennt-35
OLD_FILES+=usr/share/terminfo/o/opennt-35-nti
OLD_FILES+=usr/share/terminfo/o/opennt-35-w
OLD_FILES+=usr/share/terminfo/o/opennt-50
OLD_FILES+=usr/share/terminfo/o/opennt-50-nti
OLD_FILES+=usr/share/terminfo/o/opennt-50-w
OLD_FILES+=usr/share/terminfo/o/opennt-60
OLD_FILES+=usr/share/terminfo/o/opennt-60-nti
OLD_FILES+=usr/share/terminfo/o/opennt-60-w
OLD_FILES+=usr/share/terminfo/o/opennt-nti
OLD_FILES+=usr/share/terminfo/o/opennt-w
OLD_FILES+=usr/share/terminfo/o/opennt-w-vt
OLD_FILES+=usr/share/terminfo/o/opus3n1+
OLD_FILES+=usr/share/terminfo/o/origibmpc3
OLD_FILES+=usr/share/terminfo/o/origpc3
OLD_FILES+=usr/share/terminfo/o/os9LII
OLD_FILES+=usr/share/terminfo/o/osborne
OLD_FILES+=usr/share/terminfo/o/osborne-w
OLD_FILES+=usr/share/terminfo/o/osborne1
OLD_FILES+=usr/share/terminfo/o/osborne1-w
OLD_FILES+=usr/share/terminfo/o/osexec
OLD_FILES+=usr/share/terminfo/o/otek4112
OLD_FILES+=usr/share/terminfo/o/otek4113
OLD_FILES+=usr/share/terminfo/o/otek4114
OLD_FILES+=usr/share/terminfo/o/otek4115
OLD_FILES+=usr/share/terminfo/o/owl
OLD_DIRS+=usr/share/terminfo/o/
OLD_FILES+=usr/share/terminfo/p/p12
OLD_FILES+=usr/share/terminfo/p/p12-m
OLD_FILES+=usr/share/terminfo/p/p12-m-w
OLD_FILES+=usr/share/terminfo/p/p12-w
OLD_FILES+=usr/share/terminfo/p/p14
OLD_FILES+=usr/share/terminfo/p/p14-m
OLD_FILES+=usr/share/terminfo/p/p14-m-w
OLD_FILES+=usr/share/terminfo/p/p14-w
OLD_FILES+=usr/share/terminfo/p/p19
OLD_FILES+=usr/share/terminfo/p/p4
OLD_FILES+=usr/share/terminfo/p/p5
OLD_FILES+=usr/share/terminfo/p/p7
OLD_FILES+=usr/share/terminfo/p/p8
OLD_FILES+=usr/share/terminfo/p/p8-w
OLD_FILES+=usr/share/terminfo/p/p8gl
OLD_FILES+=usr/share/terminfo/p/p9
OLD_FILES+=usr/share/terminfo/p/p9-8
OLD_FILES+=usr/share/terminfo/p/p9-8-w
OLD_FILES+=usr/share/terminfo/p/p9-w
OLD_FILES+=usr/share/terminfo/p/pc-coherent
OLD_FILES+=usr/share/terminfo/p/pc-minix
OLD_FILES+=usr/share/terminfo/p/pc-venix
OLD_FILES+=usr/share/terminfo/p/pc3
OLD_FILES+=usr/share/terminfo/p/pc3-bold
OLD_FILES+=usr/share/terminfo/p/pc3r
OLD_FILES+=usr/share/terminfo/p/pc3r-m
OLD_FILES+=usr/share/terminfo/p/pc6300plus
OLD_FILES+=usr/share/terminfo/p/pc7300
OLD_FILES+=usr/share/terminfo/p/pcansi
OLD_FILES+=usr/share/terminfo/p/pcansi-25
OLD_FILES+=usr/share/terminfo/p/pcansi-25-m
OLD_FILES+=usr/share/terminfo/p/pcansi-33
OLD_FILES+=usr/share/terminfo/p/pcansi-33-m
OLD_FILES+=usr/share/terminfo/p/pcansi-43
OLD_FILES+=usr/share/terminfo/p/pcansi-43-m
OLD_FILES+=usr/share/terminfo/p/pcansi-m
OLD_FILES+=usr/share/terminfo/p/pcansi-mono
OLD_FILES+=usr/share/terminfo/p/pcansi25
OLD_FILES+=usr/share/terminfo/p/pcansi25m
OLD_FILES+=usr/share/terminfo/p/pcansi33
OLD_FILES+=usr/share/terminfo/p/pcansi33m
OLD_FILES+=usr/share/terminfo/p/pcansi43
OLD_FILES+=usr/share/terminfo/p/pccon
OLD_FILES+=usr/share/terminfo/p/pccon+base
OLD_FILES+=usr/share/terminfo/p/pccon+colors
OLD_FILES+=usr/share/terminfo/p/pccon+keys
OLD_FILES+=usr/share/terminfo/p/pccon+sgr+acs
OLD_FILES+=usr/share/terminfo/p/pccon+sgr+acs0
OLD_FILES+=usr/share/terminfo/p/pccon-m
OLD_FILES+=usr/share/terminfo/p/pccon0
OLD_FILES+=usr/share/terminfo/p/pccon0-m
OLD_FILES+=usr/share/terminfo/p/pccons
OLD_FILES+=usr/share/terminfo/p/pcconsole
OLD_FILES+=usr/share/terminfo/p/pcix
OLD_FILES+=usr/share/terminfo/p/pckermit
OLD_FILES+=usr/share/terminfo/p/pckermit12
OLD_FILES+=usr/share/terminfo/p/pckermit120
OLD_FILES+=usr/share/terminfo/p/pcmw
OLD_FILES+=usr/share/terminfo/p/pcplot
OLD_FILES+=usr/share/terminfo/p/pcvt25
OLD_FILES+=usr/share/terminfo/p/pcvt25-color
OLD_FILES+=usr/share/terminfo/p/pcvt25w
OLD_FILES+=usr/share/terminfo/p/pcvt28
OLD_FILES+=usr/share/terminfo/p/pcvt28w
OLD_FILES+=usr/share/terminfo/p/pcvt35
OLD_FILES+=usr/share/terminfo/p/pcvt35w
OLD_FILES+=usr/share/terminfo/p/pcvt40
OLD_FILES+=usr/share/terminfo/p/pcvt40w
OLD_FILES+=usr/share/terminfo/p/pcvt43
OLD_FILES+=usr/share/terminfo/p/pcvt43w
OLD_FILES+=usr/share/terminfo/p/pcvt50
OLD_FILES+=usr/share/terminfo/p/pcvt50w
OLD_FILES+=usr/share/terminfo/p/pcvtXX
OLD_FILES+=usr/share/terminfo/p/pcz19
OLD_FILES+=usr/share/terminfo/p/pe1100
OLD_FILES+=usr/share/terminfo/p/pe1200
OLD_FILES+=usr/share/terminfo/p/pe1251
OLD_FILES+=usr/share/terminfo/p/pe550
OLD_FILES+=usr/share/terminfo/p/pe6100
OLD_FILES+=usr/share/terminfo/p/pe6300
OLD_FILES+=usr/share/terminfo/p/pe6312
OLD_FILES+=usr/share/terminfo/p/pe7000c
OLD_FILES+=usr/share/terminfo/p/pe7000m
OLD_FILES+=usr/share/terminfo/p/pilot
OLD_FILES+=usr/share/terminfo/p/pmcons
OLD_FILES+=usr/share/terminfo/p/pmconsole
OLD_FILES+=usr/share/terminfo/p/printer
OLD_FILES+=usr/share/terminfo/p/prism12
OLD_FILES+=usr/share/terminfo/p/prism12-m
OLD_FILES+=usr/share/terminfo/p/prism12-m-w
OLD_FILES+=usr/share/terminfo/p/prism12-w
OLD_FILES+=usr/share/terminfo/p/prism14
OLD_FILES+=usr/share/terminfo/p/prism14-m
OLD_FILES+=usr/share/terminfo/p/prism14-m-w
OLD_FILES+=usr/share/terminfo/p/prism14-w
OLD_FILES+=usr/share/terminfo/p/prism2
OLD_FILES+=usr/share/terminfo/p/prism4
OLD_FILES+=usr/share/terminfo/p/prism5
OLD_FILES+=usr/share/terminfo/p/prism7
OLD_FILES+=usr/share/terminfo/p/prism8
OLD_FILES+=usr/share/terminfo/p/prism8-w
OLD_FILES+=usr/share/terminfo/p/prism8gl
OLD_FILES+=usr/share/terminfo/p/prism9
OLD_FILES+=usr/share/terminfo/p/prism9-8
OLD_FILES+=usr/share/terminfo/p/prism9-8-w
OLD_FILES+=usr/share/terminfo/p/prism9-w
OLD_FILES+=usr/share/terminfo/p/pro350
OLD_FILES+=usr/share/terminfo/p/ps300
OLD_FILES+=usr/share/terminfo/p/psterm
OLD_FILES+=usr/share/terminfo/p/psterm-80x24
OLD_FILES+=usr/share/terminfo/p/psterm-90x28
OLD_FILES+=usr/share/terminfo/p/psterm-96x48
OLD_FILES+=usr/share/terminfo/p/psterm-basic
OLD_FILES+=usr/share/terminfo/p/psterm-fast
OLD_FILES+=usr/share/terminfo/p/psx_ansi
OLD_FILES+=usr/share/terminfo/p/pt100
OLD_FILES+=usr/share/terminfo/p/pt100w
OLD_FILES+=usr/share/terminfo/p/pt200
OLD_FILES+=usr/share/terminfo/p/pt200w
OLD_FILES+=usr/share/terminfo/p/pt210
OLD_FILES+=usr/share/terminfo/p/pt250
OLD_FILES+=usr/share/terminfo/p/pt250w
OLD_FILES+=usr/share/terminfo/p/pt505
OLD_FILES+=usr/share/terminfo/p/pt505-22
OLD_FILES+=usr/share/terminfo/p/pt505-24
OLD_FILES+=usr/share/terminfo/p/pty
OLD_FILES+=usr/share/terminfo/p/putty
OLD_FILES+=usr/share/terminfo/p/putty+fnkeys
OLD_FILES+=usr/share/terminfo/p/putty+fnkeys+esc
OLD_FILES+=usr/share/terminfo/p/putty+fnkeys+linux
OLD_FILES+=usr/share/terminfo/p/putty+fnkeys+sco
OLD_FILES+=usr/share/terminfo/p/putty+fnkeys+vt100
OLD_FILES+=usr/share/terminfo/p/putty+fnkeys+vt400
OLD_FILES+=usr/share/terminfo/p/putty+fnkeys+xterm
OLD_FILES+=usr/share/terminfo/p/putty+keypad
OLD_FILES+=usr/share/terminfo/p/putty+screen
OLD_FILES+=usr/share/terminfo/p/putty-256color
OLD_FILES+=usr/share/terminfo/p/putty-m1
OLD_FILES+=usr/share/terminfo/p/putty-m1b
OLD_FILES+=usr/share/terminfo/p/putty-m2
OLD_FILES+=usr/share/terminfo/p/putty-noapp
OLD_FILES+=usr/share/terminfo/p/putty-sco
OLD_FILES+=usr/share/terminfo/p/putty-screen
OLD_FILES+=usr/share/terminfo/p/putty-vt100
OLD_DIRS+=usr/share/terminfo/p/
OLD_FILES+=usr/share/terminfo/q/qansi
OLD_FILES+=usr/share/terminfo/q/qansi-g
OLD_FILES+=usr/share/terminfo/q/qansi-m
OLD_FILES+=usr/share/terminfo/q/qansi-t
OLD_FILES+=usr/share/terminfo/q/qansi-w
OLD_FILES+=usr/share/terminfo/q/qdcons
OLD_FILES+=usr/share/terminfo/q/qdss
OLD_FILES+=usr/share/terminfo/q/qnx
OLD_FILES+=usr/share/terminfo/q/qnx4
OLD_FILES+=usr/share/terminfo/q/qnxm
OLD_FILES+=usr/share/terminfo/q/qnxt
OLD_FILES+=usr/share/terminfo/q/qnxt2
OLD_FILES+=usr/share/terminfo/q/qnxt4
OLD_FILES+=usr/share/terminfo/q/qnxtmono
OLD_FILES+=usr/share/terminfo/q/qnxw
OLD_FILES+=usr/share/terminfo/q/qume
OLD_FILES+=usr/share/terminfo/q/qume5
OLD_FILES+=usr/share/terminfo/q/qvt101
OLD_FILES+=usr/share/terminfo/q/qvt101+
OLD_FILES+=usr/share/terminfo/q/qvt101p
OLD_FILES+=usr/share/terminfo/q/qvt102
OLD_FILES+=usr/share/terminfo/q/qvt103
OLD_FILES+=usr/share/terminfo/q/qvt103-w
OLD_FILES+=usr/share/terminfo/q/qvt108
OLD_FILES+=usr/share/terminfo/q/qvt119
OLD_FILES+=usr/share/terminfo/q/qvt119+
OLD_FILES+=usr/share/terminfo/q/qvt119+-25
OLD_FILES+=usr/share/terminfo/q/qvt119+-25-w
OLD_FILES+=usr/share/terminfo/q/qvt119+-w
OLD_FILES+=usr/share/terminfo/q/qvt119-25-w
OLD_FILES+=usr/share/terminfo/q/qvt119-w
OLD_FILES+=usr/share/terminfo/q/qvt119p
OLD_FILES+=usr/share/terminfo/q/qvt119p-25
OLD_FILES+=usr/share/terminfo/q/qvt119p-25-w
OLD_FILES+=usr/share/terminfo/q/qvt119p-w
OLD_FILES+=usr/share/terminfo/q/qvt203
OLD_FILES+=usr/share/terminfo/q/qvt203+
OLD_FILES+=usr/share/terminfo/q/qvt203-25
OLD_FILES+=usr/share/terminfo/q/qvt203-25-w
OLD_FILES+=usr/share/terminfo/q/qvt203-w
OLD_FILES+=usr/share/terminfo/q/qvt203-w-am
OLD_DIRS+=usr/share/terminfo/q/
OLD_FILES+=usr/share/terminfo/r/rbcomm
OLD_FILES+=usr/share/terminfo/r/rbcomm-nam
OLD_FILES+=usr/share/terminfo/r/rbcomm-w
OLD_FILES+=usr/share/terminfo/r/rca
OLD_FILES+=usr/share/terminfo/r/rcons
OLD_FILES+=usr/share/terminfo/r/rcons-color
OLD_FILES+=usr/share/terminfo/r/rebus3180
OLD_FILES+=usr/share/terminfo/r/regent
OLD_FILES+=usr/share/terminfo/r/regent100
OLD_FILES+=usr/share/terminfo/r/regent20
OLD_FILES+=usr/share/terminfo/r/regent200
OLD_FILES+=usr/share/terminfo/r/regent25
OLD_FILES+=usr/share/terminfo/r/regent40
OLD_FILES+=usr/share/terminfo/r/regent40+
OLD_FILES+=usr/share/terminfo/r/regent60
OLD_FILES+=usr/share/terminfo/r/rt6221
OLD_FILES+=usr/share/terminfo/r/rt6221-w
OLD_FILES+=usr/share/terminfo/r/rtpc
OLD_FILES+=usr/share/terminfo/r/rxvt
OLD_FILES+=usr/share/terminfo/r/rxvt+pcfkeys
OLD_FILES+=usr/share/terminfo/r/rxvt-16color
OLD_FILES+=usr/share/terminfo/r/rxvt-256color
OLD_FILES+=usr/share/terminfo/r/rxvt-88color
OLD_FILES+=usr/share/terminfo/r/rxvt-basic
OLD_FILES+=usr/share/terminfo/r/rxvt-color
OLD_FILES+=usr/share/terminfo/r/rxvt-cygwin
OLD_FILES+=usr/share/terminfo/r/rxvt-cygwin-native
OLD_FILES+=usr/share/terminfo/r/rxvt-xpm
OLD_DIRS+=usr/share/terminfo/r/
OLD_FILES+=usr/share/terminfo/s/s4
OLD_FILES+=usr/share/terminfo/s/sb1
OLD_FILES+=usr/share/terminfo/s/sb2
OLD_FILES+=usr/share/terminfo/s/sb3
OLD_FILES+=usr/share/terminfo/s/sbi
OLD_FILES+=usr/share/terminfo/s/sbobcat
OLD_FILES+=usr/share/terminfo/s/sc410
OLD_FILES+=usr/share/terminfo/s/sc415
OLD_FILES+=usr/share/terminfo/s/scanset
OLD_FILES+=usr/share/terminfo/s/scoansi
OLD_FILES+=usr/share/terminfo/s/scoansi-new
OLD_FILES+=usr/share/terminfo/s/scoansi-old
OLD_FILES+=usr/share/terminfo/s/screen
OLD_FILES+=usr/share/terminfo/s/screen+fkeys
OLD_FILES+=usr/share/terminfo/s/screen+italics
OLD_FILES+=usr/share/terminfo/s/screen-16color
OLD_FILES+=usr/share/terminfo/s/screen-16color-bce
OLD_FILES+=usr/share/terminfo/s/screen-16color-bce-s
OLD_FILES+=usr/share/terminfo/s/screen-16color-s
OLD_FILES+=usr/share/terminfo/s/screen-256color
OLD_FILES+=usr/share/terminfo/s/screen-256color-bce
OLD_FILES+=usr/share/terminfo/s/screen-256color-bce-s
OLD_FILES+=usr/share/terminfo/s/screen-256color-s
OLD_FILES+=usr/share/terminfo/s/screen-bce
OLD_FILES+=usr/share/terminfo/s/screen-bce.Eterm
OLD_FILES+=usr/share/terminfo/s/screen-bce.gnome
OLD_FILES+=usr/share/terminfo/s/screen-bce.konsole
OLD_FILES+=usr/share/terminfo/s/screen-bce.linux
OLD_FILES+=usr/share/terminfo/s/screen-bce.mrxvt
OLD_FILES+=usr/share/terminfo/s/screen-bce.rxvt
OLD_FILES+=usr/share/terminfo/s/screen-bce.xterm-new
OLD_FILES+=usr/share/terminfo/s/screen-s
OLD_FILES+=usr/share/terminfo/s/screen-w
OLD_FILES+=usr/share/terminfo/s/screen.Eterm
OLD_FILES+=usr/share/terminfo/s/screen.gnome
OLD_FILES+=usr/share/terminfo/s/screen.konsole
OLD_FILES+=usr/share/terminfo/s/screen.konsole-256color
OLD_FILES+=usr/share/terminfo/s/screen.linux
OLD_FILES+=usr/share/terminfo/s/screen.linux-m1
OLD_FILES+=usr/share/terminfo/s/screen.linux-m1b
OLD_FILES+=usr/share/terminfo/s/screen.linux-m2
OLD_FILES+=usr/share/terminfo/s/screen.linux-s
OLD_FILES+=usr/share/terminfo/s/screen.minitel1
OLD_FILES+=usr/share/terminfo/s/screen.minitel1-nb
OLD_FILES+=usr/share/terminfo/s/screen.minitel12-80
OLD_FILES+=usr/share/terminfo/s/screen.minitel1b
OLD_FILES+=usr/share/terminfo/s/screen.minitel1b-80
OLD_FILES+=usr/share/terminfo/s/screen.minitel1b-nb
OLD_FILES+=usr/share/terminfo/s/screen.minitel2-80
OLD_FILES+=usr/share/terminfo/s/screen.mlterm
OLD_FILES+=usr/share/terminfo/s/screen.mlterm-256color
OLD_FILES+=usr/share/terminfo/s/screen.mrxvt
OLD_FILES+=usr/share/terminfo/s/screen.putty
OLD_FILES+=usr/share/terminfo/s/screen.putty-256color
OLD_FILES+=usr/share/terminfo/s/screen.putty-m1
OLD_FILES+=usr/share/terminfo/s/screen.putty-m1b
OLD_FILES+=usr/share/terminfo/s/screen.putty-m2
OLD_FILES+=usr/share/terminfo/s/screen.rxvt
OLD_FILES+=usr/share/terminfo/s/screen.teraterm
OLD_FILES+=usr/share/terminfo/s/screen.vte
OLD_FILES+=usr/share/terminfo/s/screen.vte-256color
OLD_FILES+=usr/share/terminfo/s/screen.xterm-256color
OLD_FILES+=usr/share/terminfo/s/screen.xterm-new
OLD_FILES+=usr/share/terminfo/s/screen.xterm-r6
OLD_FILES+=usr/share/terminfo/s/screen.xterm-xfree86
OLD_FILES+=usr/share/terminfo/s/screen2
OLD_FILES+=usr/share/terminfo/s/screen3
OLD_FILES+=usr/share/terminfo/s/screen4
OLD_FILES+=usr/share/terminfo/s/screen5
OLD_FILES+=usr/share/terminfo/s/screwpoint
OLD_FILES+=usr/share/terminfo/s/scrhp
OLD_FILES+=usr/share/terminfo/s/scrt
OLD_FILES+=usr/share/terminfo/s/securecrt
OLD_FILES+=usr/share/terminfo/s/sibo
OLD_FILES+=usr/share/terminfo/s/simpleterm
OLD_FILES+=usr/share/terminfo/s/simterm
OLD_FILES+=usr/share/terminfo/s/soroc
OLD_FILES+=usr/share/terminfo/s/soroc120
OLD_FILES+=usr/share/terminfo/s/soroc140
OLD_FILES+=usr/share/terminfo/s/spinwriter
OLD_FILES+=usr/share/terminfo/s/st
OLD_FILES+=usr/share/terminfo/s/st-0.6
OLD_FILES+=usr/share/terminfo/s/st-0.7
OLD_FILES+=usr/share/terminfo/s/st-0.8
OLD_FILES+=usr/share/terminfo/s/st-16color
OLD_FILES+=usr/share/terminfo/s/st-256color
OLD_FILES+=usr/share/terminfo/s/st-direct
OLD_FILES+=usr/share/terminfo/s/st52
OLD_FILES+=usr/share/terminfo/s/st52-color
OLD_FILES+=usr/share/terminfo/s/st52-m
OLD_FILES+=usr/share/terminfo/s/st52-old
OLD_FILES+=usr/share/terminfo/s/stterm
OLD_FILES+=usr/share/terminfo/s/stterm-16color
OLD_FILES+=usr/share/terminfo/s/stterm-256color
OLD_FILES+=usr/share/terminfo/s/stv52
OLD_FILES+=usr/share/terminfo/s/stv52pc
OLD_FILES+=usr/share/terminfo/s/sun
OLD_FILES+=usr/share/terminfo/s/sun+sl
OLD_FILES+=usr/share/terminfo/s/sun-1
OLD_FILES+=usr/share/terminfo/s/sun-12
OLD_FILES+=usr/share/terminfo/s/sun-17
OLD_FILES+=usr/share/terminfo/s/sun-24
OLD_FILES+=usr/share/terminfo/s/sun-34
OLD_FILES+=usr/share/terminfo/s/sun-48
OLD_FILES+=usr/share/terminfo/s/sun-c
OLD_FILES+=usr/share/terminfo/s/sun-cgsix
OLD_FILES+=usr/share/terminfo/s/sun-cmd
OLD_FILES+=usr/share/terminfo/s/sun-color
OLD_FILES+=usr/share/terminfo/s/sun-e
OLD_FILES+=usr/share/terminfo/s/sun-e-s
OLD_FILES+=usr/share/terminfo/s/sun-il
OLD_FILES+=usr/share/terminfo/s/sun-nic
OLD_FILES+=usr/share/terminfo/s/sun-s
OLD_FILES+=usr/share/terminfo/s/sun-s-e
OLD_FILES+=usr/share/terminfo/s/sun-ss5
OLD_FILES+=usr/share/terminfo/s/sun-type4
OLD_FILES+=usr/share/terminfo/s/sun1
OLD_FILES+=usr/share/terminfo/s/sun2
OLD_FILES+=usr/share/terminfo/s/sune
OLD_FILES+=usr/share/terminfo/s/superbee
OLD_FILES+=usr/share/terminfo/s/superbee-xsb
OLD_FILES+=usr/share/terminfo/s/superbeeic
OLD_FILES+=usr/share/terminfo/s/superbrain
OLD_FILES+=usr/share/terminfo/s/sv80
OLD_FILES+=usr/share/terminfo/s/swtp
OLD_FILES+=usr/share/terminfo/s/synertek
OLD_FILES+=usr/share/terminfo/s/synertek380
OLD_FILES+=usr/share/terminfo/s/system1
OLD_DIRS+=usr/share/terminfo/s/
OLD_FILES+=usr/share/terminfo/t/t10
OLD_FILES+=usr/share/terminfo/t/t1061
OLD_FILES+=usr/share/terminfo/t/t1061f
OLD_FILES+=usr/share/terminfo/t/t16
OLD_FILES+=usr/share/terminfo/t/t3700
OLD_FILES+=usr/share/terminfo/t/t3800
OLD_FILES+=usr/share/terminfo/t/t653x
OLD_FILES+=usr/share/terminfo/t/tab
OLD_FILES+=usr/share/terminfo/t/tab132
OLD_FILES+=usr/share/terminfo/t/tab132-15
OLD_FILES+=usr/share/terminfo/t/tab132-rv
OLD_FILES+=usr/share/terminfo/t/tab132-w
OLD_FILES+=usr/share/terminfo/t/tab132-w-rv
OLD_FILES+=usr/share/terminfo/t/tandem6510
OLD_FILES+=usr/share/terminfo/t/tandem653
OLD_FILES+=usr/share/terminfo/t/tek
OLD_FILES+=usr/share/terminfo/t/tek4012
OLD_FILES+=usr/share/terminfo/t/tek4013
OLD_FILES+=usr/share/terminfo/t/tek4014
OLD_FILES+=usr/share/terminfo/t/tek4014-sm
OLD_FILES+=usr/share/terminfo/t/tek4015
OLD_FILES+=usr/share/terminfo/t/tek4015-sm
OLD_FILES+=usr/share/terminfo/t/tek4023
OLD_FILES+=usr/share/terminfo/t/tek4024
OLD_FILES+=usr/share/terminfo/t/tek4025
OLD_FILES+=usr/share/terminfo/t/tek4025-17
OLD_FILES+=usr/share/terminfo/t/tek4025-17-ws
OLD_FILES+=usr/share/terminfo/t/tek4025-cr
OLD_FILES+=usr/share/terminfo/t/tek4025-ex
OLD_FILES+=usr/share/terminfo/t/tek4025a
OLD_FILES+=usr/share/terminfo/t/tek4025ex
OLD_FILES+=usr/share/terminfo/t/tek4027
OLD_FILES+=usr/share/terminfo/t/tek4027-ex
OLD_FILES+=usr/share/terminfo/t/tek4105
OLD_FILES+=usr/share/terminfo/t/tek4105-30
OLD_FILES+=usr/share/terminfo/t/tek4105a
OLD_FILES+=usr/share/terminfo/t/tek4106brl
OLD_FILES+=usr/share/terminfo/t/tek4107
OLD_FILES+=usr/share/terminfo/t/tek4107brl
OLD_FILES+=usr/share/terminfo/t/tek4109
OLD_FILES+=usr/share/terminfo/t/tek4109brl
OLD_FILES+=usr/share/terminfo/t/tek4112
OLD_FILES+=usr/share/terminfo/t/tek4112-5
OLD_FILES+=usr/share/terminfo/t/tek4112-nd
OLD_FILES+=usr/share/terminfo/t/tek4113
OLD_FILES+=usr/share/terminfo/t/tek4113-34
OLD_FILES+=usr/share/terminfo/t/tek4113-nd
OLD_FILES+=usr/share/terminfo/t/tek4114
OLD_FILES+=usr/share/terminfo/t/tek4115
OLD_FILES+=usr/share/terminfo/t/tek4125
OLD_FILES+=usr/share/terminfo/t/tek4205
OLD_FILES+=usr/share/terminfo/t/tek4207
OLD_FILES+=usr/share/terminfo/t/tek4207-s
OLD_FILES+=usr/share/terminfo/t/tek4404
OLD_FILES+=usr/share/terminfo/t/teken
OLD_FILES+=usr/share/terminfo/t/teleray
OLD_FILES+=usr/share/terminfo/t/teletec
OLD_FILES+=usr/share/terminfo/t/teraterm
OLD_FILES+=usr/share/terminfo/t/teraterm-256color
OLD_FILES+=usr/share/terminfo/t/teraterm2.3
OLD_FILES+=usr/share/terminfo/t/teraterm4.59
OLD_FILES+=usr/share/terminfo/t/teraterm4.97
OLD_FILES+=usr/share/terminfo/t/terminator
OLD_FILES+=usr/share/terminfo/t/terminet
OLD_FILES+=usr/share/terminfo/t/terminet1200
OLD_FILES+=usr/share/terminfo/t/terminet300
OLD_FILES+=usr/share/terminfo/t/terminology
OLD_FILES+=usr/share/terminfo/t/terminology-0.6.1
OLD_FILES+=usr/share/terminfo/t/terminology-1.0.0
OLD_FILES+=usr/share/terminfo/t/terminology-1.8.1
OLD_FILES+=usr/share/terminfo/t/termite
OLD_FILES+=usr/share/terminfo/t/tgtelnet
OLD_FILES+=usr/share/terminfo/t/ti700
OLD_FILES+=usr/share/terminfo/t/ti703
OLD_FILES+=usr/share/terminfo/t/ti703-w
OLD_FILES+=usr/share/terminfo/t/ti707
OLD_FILES+=usr/share/terminfo/t/ti707-w
OLD_FILES+=usr/share/terminfo/t/ti733
OLD_FILES+=usr/share/terminfo/t/ti735
OLD_FILES+=usr/share/terminfo/t/ti745
OLD_FILES+=usr/share/terminfo/t/ti800
OLD_FILES+=usr/share/terminfo/t/ti916
OLD_FILES+=usr/share/terminfo/t/ti916-132
OLD_FILES+=usr/share/terminfo/t/ti916-220-7
OLD_FILES+=usr/share/terminfo/t/ti916-220-8
OLD_FILES+=usr/share/terminfo/t/ti916-8
OLD_FILES+=usr/share/terminfo/t/ti916-8-132
OLD_FILES+=usr/share/terminfo/t/ti924
OLD_FILES+=usr/share/terminfo/t/ti924-8
OLD_FILES+=usr/share/terminfo/t/ti924-8w
OLD_FILES+=usr/share/terminfo/t/ti924w
OLD_FILES+=usr/share/terminfo/t/ti926
OLD_FILES+=usr/share/terminfo/t/ti926-8
OLD_FILES+=usr/share/terminfo/t/ti928
OLD_FILES+=usr/share/terminfo/t/ti928-8
OLD_FILES+=usr/share/terminfo/t/ti931
OLD_FILES+=usr/share/terminfo/t/ti_ansi
OLD_FILES+=usr/share/terminfo/t/tkterm
OLD_FILES+=usr/share/terminfo/t/tmux
OLD_FILES+=usr/share/terminfo/t/tmux-256color
OLD_FILES+=usr/share/terminfo/t/tmux-direct
OLD_FILES+=usr/share/terminfo/t/tn1200
OLD_FILES+=usr/share/terminfo/t/tn300
OLD_FILES+=usr/share/terminfo/t/trs16
OLD_FILES+=usr/share/terminfo/t/trs2
OLD_FILES+=usr/share/terminfo/t/trs80II
OLD_FILES+=usr/share/terminfo/t/trsII
OLD_FILES+=usr/share/terminfo/t/ts-1
OLD_FILES+=usr/share/terminfo/t/ts-1p
OLD_FILES+=usr/share/terminfo/t/ts1
OLD_FILES+=usr/share/terminfo/t/ts100
OLD_FILES+=usr/share/terminfo/t/ts100-ctxt
OLD_FILES+=usr/share/terminfo/t/ts100-sp
OLD_FILES+=usr/share/terminfo/t/ts1p
OLD_FILES+=usr/share/terminfo/t/tt
OLD_FILES+=usr/share/terminfo/t/tt505-22
OLD_FILES+=usr/share/terminfo/t/tt52
OLD_FILES+=usr/share/terminfo/t/tty33
OLD_FILES+=usr/share/terminfo/t/tty35
OLD_FILES+=usr/share/terminfo/t/tty37
OLD_FILES+=usr/share/terminfo/t/tty40
OLD_FILES+=usr/share/terminfo/t/tty43
OLD_FILES+=usr/share/terminfo/t/tty4420
OLD_FILES+=usr/share/terminfo/t/tty4424
OLD_FILES+=usr/share/terminfo/t/tty4424-1
OLD_FILES+=usr/share/terminfo/t/tty4424m
OLD_FILES+=usr/share/terminfo/t/tty4426
OLD_FILES+=usr/share/terminfo/t/tty5410
OLD_FILES+=usr/share/terminfo/t/tty5410-w
OLD_FILES+=usr/share/terminfo/t/tty5410v1
OLD_FILES+=usr/share/terminfo/t/tty5410v1-w
OLD_FILES+=usr/share/terminfo/t/tty5420
OLD_FILES+=usr/share/terminfo/t/tty5420+nl
OLD_FILES+=usr/share/terminfo/t/tty5420-nl
OLD_FILES+=usr/share/terminfo/t/tty5420-rv
OLD_FILES+=usr/share/terminfo/t/tty5420-rv-nl
OLD_FILES+=usr/share/terminfo/t/tty5420-w
OLD_FILES+=usr/share/terminfo/t/tty5420-w-nl
OLD_FILES+=usr/share/terminfo/t/tty5420-w-rv
OLD_FILES+=usr/share/terminfo/t/tty5420-w-rv-n
OLD_FILES+=usr/share/terminfo/t/tty5425
OLD_FILES+=usr/share/terminfo/t/tty5425-nl
OLD_FILES+=usr/share/terminfo/t/tty5425-w
OLD_FILES+=usr/share/terminfo/t/tty5620
OLD_FILES+=usr/share/terminfo/t/tty5620-1
OLD_FILES+=usr/share/terminfo/t/tty5620-24
OLD_FILES+=usr/share/terminfo/t/tty5620-34
OLD_FILES+=usr/share/terminfo/t/tty5620-s
OLD_FILES+=usr/share/terminfo/t/ttydmd
OLD_FILES+=usr/share/terminfo/t/tvi803
OLD_FILES+=usr/share/terminfo/t/tvi9065
OLD_FILES+=usr/share/terminfo/t/tvi910
OLD_FILES+=usr/share/terminfo/t/tvi910+
OLD_FILES+=usr/share/terminfo/t/tvi912
OLD_FILES+=usr/share/terminfo/t/tvi912b
OLD_FILES+=usr/share/terminfo/t/tvi912b+2p
OLD_FILES+=usr/share/terminfo/t/tvi912b+dim
OLD_FILES+=usr/share/terminfo/t/tvi912b+mc
OLD_FILES+=usr/share/terminfo/t/tvi912b+printer
OLD_FILES+=usr/share/terminfo/t/tvi912b+vb
OLD_FILES+=usr/share/terminfo/t/tvi912b-2p
OLD_FILES+=usr/share/terminfo/t/tvi912b-2p-mc
OLD_FILES+=usr/share/terminfo/t/tvi912b-2p-p
OLD_FILES+=usr/share/terminfo/t/tvi912b-2p-unk
OLD_FILES+=usr/share/terminfo/t/tvi912b-mc
OLD_FILES+=usr/share/terminfo/t/tvi912b-mc-2p
OLD_FILES+=usr/share/terminfo/t/tvi912b-mc-vb
OLD_FILES+=usr/share/terminfo/t/tvi912b-p
OLD_FILES+=usr/share/terminfo/t/tvi912b-p-2p
OLD_FILES+=usr/share/terminfo/t/tvi912b-p-vb
OLD_FILES+=usr/share/terminfo/t/tvi912b-unk
OLD_FILES+=usr/share/terminfo/t/tvi912b-unk-2p
OLD_FILES+=usr/share/terminfo/t/tvi912b-unk-vb
OLD_FILES+=usr/share/terminfo/t/tvi912b-vb
OLD_FILES+=usr/share/terminfo/t/tvi912b-vb-mc
OLD_FILES+=usr/share/terminfo/t/tvi912b-vb-p
OLD_FILES+=usr/share/terminfo/t/tvi912b-vb-unk
OLD_FILES+=usr/share/terminfo/t/tvi912c
OLD_FILES+=usr/share/terminfo/t/tvi912c-2p
OLD_FILES+=usr/share/terminfo/t/tvi912c-2p-mc
OLD_FILES+=usr/share/terminfo/t/tvi912c-2p-p
OLD_FILES+=usr/share/terminfo/t/tvi912c-2p-unk
OLD_FILES+=usr/share/terminfo/t/tvi912c-mc
OLD_FILES+=usr/share/terminfo/t/tvi912c-mc-2p
OLD_FILES+=usr/share/terminfo/t/tvi912c-mc-vb
OLD_FILES+=usr/share/terminfo/t/tvi912c-p
OLD_FILES+=usr/share/terminfo/t/tvi912c-p-2p
OLD_FILES+=usr/share/terminfo/t/tvi912c-p-vb
OLD_FILES+=usr/share/terminfo/t/tvi912c-unk
OLD_FILES+=usr/share/terminfo/t/tvi912c-unk-2p
OLD_FILES+=usr/share/terminfo/t/tvi912c-unk-vb
OLD_FILES+=usr/share/terminfo/t/tvi912c-vb
OLD_FILES+=usr/share/terminfo/t/tvi912c-vb-mc
OLD_FILES+=usr/share/terminfo/t/tvi912c-vb-p
OLD_FILES+=usr/share/terminfo/t/tvi912c-vb-unk
OLD_FILES+=usr/share/terminfo/t/tvi912cc
OLD_FILES+=usr/share/terminfo/t/tvi914
OLD_FILES+=usr/share/terminfo/t/tvi920
OLD_FILES+=usr/share/terminfo/t/tvi920b
OLD_FILES+=usr/share/terminfo/t/tvi920b+fn
OLD_FILES+=usr/share/terminfo/t/tvi920b-2p
OLD_FILES+=usr/share/terminfo/t/tvi920b-2p-mc
OLD_FILES+=usr/share/terminfo/t/tvi920b-2p-p
OLD_FILES+=usr/share/terminfo/t/tvi920b-2p-unk
OLD_FILES+=usr/share/terminfo/t/tvi920b-mc
OLD_FILES+=usr/share/terminfo/t/tvi920b-mc-2p
OLD_FILES+=usr/share/terminfo/t/tvi920b-mc-vb
OLD_FILES+=usr/share/terminfo/t/tvi920b-p
OLD_FILES+=usr/share/terminfo/t/tvi920b-p-2p
OLD_FILES+=usr/share/terminfo/t/tvi920b-p-vb
OLD_FILES+=usr/share/terminfo/t/tvi920b-unk
OLD_FILES+=usr/share/terminfo/t/tvi920b-unk-2p
OLD_FILES+=usr/share/terminfo/t/tvi920b-unk-vb
OLD_FILES+=usr/share/terminfo/t/tvi920b-vb
OLD_FILES+=usr/share/terminfo/t/tvi920b-vb-mc
OLD_FILES+=usr/share/terminfo/t/tvi920b-vb-p
OLD_FILES+=usr/share/terminfo/t/tvi920b-vb-unk
OLD_FILES+=usr/share/terminfo/t/tvi920c
OLD_FILES+=usr/share/terminfo/t/tvi920c-2p
OLD_FILES+=usr/share/terminfo/t/tvi920c-2p-mc
OLD_FILES+=usr/share/terminfo/t/tvi920c-2p-p
OLD_FILES+=usr/share/terminfo/t/tvi920c-2p-unk
OLD_FILES+=usr/share/terminfo/t/tvi920c-mc
OLD_FILES+=usr/share/terminfo/t/tvi920c-mc-2p
OLD_FILES+=usr/share/terminfo/t/tvi920c-mc-vb
OLD_FILES+=usr/share/terminfo/t/tvi920c-p
OLD_FILES+=usr/share/terminfo/t/tvi920c-p-2p
OLD_FILES+=usr/share/terminfo/t/tvi920c-p-vb
OLD_FILES+=usr/share/terminfo/t/tvi920c-unk
OLD_FILES+=usr/share/terminfo/t/tvi920c-unk-2p
OLD_FILES+=usr/share/terminfo/t/tvi920c-unk-vb
OLD_FILES+=usr/share/terminfo/t/tvi920c-vb
OLD_FILES+=usr/share/terminfo/t/tvi920c-vb-mc
OLD_FILES+=usr/share/terminfo/t/tvi920c-vb-p
OLD_FILES+=usr/share/terminfo/t/tvi920c-vb-unk
OLD_FILES+=usr/share/terminfo/t/tvi921
OLD_FILES+=usr/share/terminfo/t/tvi924
OLD_FILES+=usr/share/terminfo/t/tvi925
OLD_FILES+=usr/share/terminfo/t/tvi925-hi
OLD_FILES+=usr/share/terminfo/t/tvi92B
OLD_FILES+=usr/share/terminfo/t/tvi92D
OLD_FILES+=usr/share/terminfo/t/tvi950
OLD_FILES+=usr/share/terminfo/t/tvi950-2p
OLD_FILES+=usr/share/terminfo/t/tvi950-4p
OLD_FILES+=usr/share/terminfo/t/tvi950-rv
OLD_FILES+=usr/share/terminfo/t/tvi950-rv-2p
OLD_FILES+=usr/share/terminfo/t/tvi950-rv-4p
OLD_FILES+=usr/share/terminfo/t/tvi955
OLD_FILES+=usr/share/terminfo/t/tvi955-hb
OLD_FILES+=usr/share/terminfo/t/tvi955-w
OLD_FILES+=usr/share/terminfo/t/tvi970
OLD_FILES+=usr/share/terminfo/t/tvi970-2p
OLD_FILES+=usr/share/terminfo/t/tvi970-vb
OLD_FILES+=usr/share/terminfo/t/tvipt
OLD_FILES+=usr/share/terminfo/t/tw100
OLD_FILES+=usr/share/terminfo/t/tw52
OLD_FILES+=usr/share/terminfo/t/tw52-color
OLD_FILES+=usr/share/terminfo/t/tw52-m
OLD_FILES+=usr/share/terminfo/t/tws-generic
OLD_FILES+=usr/share/terminfo/t/tws2102-sna
OLD_FILES+=usr/share/terminfo/t/tws2103
OLD_FILES+=usr/share/terminfo/t/tws2103-sna
OLD_DIRS+=usr/share/terminfo/t/
OLD_FILES+=usr/share/terminfo/u/ultima2
OLD_FILES+=usr/share/terminfo/u/ultimaII
OLD_FILES+=usr/share/terminfo/u/uniterm
OLD_FILES+=usr/share/terminfo/u/uniterm49
OLD_FILES+=usr/share/terminfo/u/unixpc
OLD_FILES+=usr/share/terminfo/u/unknown
OLD_FILES+=usr/share/terminfo/u/uts30
OLD_FILES+=usr/share/terminfo/u/uwin
OLD_DIRS+=usr/share/terminfo/u/
OLD_FILES+=usr/share/terminfo/v/v200-nam
OLD_FILES+=usr/share/terminfo/v/v320n
OLD_FILES+=usr/share/terminfo/v/v3220
OLD_FILES+=usr/share/terminfo/v/v5410
OLD_FILES+=usr/share/terminfo/v/vanilla
OLD_FILES+=usr/share/terminfo/v/vapple
OLD_FILES+=usr/share/terminfo/v/vc103
OLD_FILES+=usr/share/terminfo/v/vc203
OLD_FILES+=usr/share/terminfo/v/vc303
OLD_FILES+=usr/share/terminfo/v/vc303a
OLD_FILES+=usr/share/terminfo/v/vc403a
OLD_FILES+=usr/share/terminfo/v/vc404
OLD_FILES+=usr/share/terminfo/v/vc404-s
OLD_FILES+=usr/share/terminfo/v/vc414
OLD_FILES+=usr/share/terminfo/v/vc414h
OLD_FILES+=usr/share/terminfo/v/vc415
OLD_FILES+=usr/share/terminfo/v/venix
OLD_FILES+=usr/share/terminfo/v/versaterm
OLD_FILES+=usr/share/terminfo/v/vi200
OLD_FILES+=usr/share/terminfo/v/vi200-f
OLD_FILES+=usr/share/terminfo/v/vi200-rv
OLD_FILES+=usr/share/terminfo/v/vi300
OLD_FILES+=usr/share/terminfo/v/vi300-old
OLD_FILES+=usr/share/terminfo/v/vi50
OLD_FILES+=usr/share/terminfo/v/vi500
OLD_FILES+=usr/share/terminfo/v/vi50adm
OLD_FILES+=usr/share/terminfo/v/vi55
OLD_FILES+=usr/share/terminfo/v/vi550
OLD_FILES+=usr/share/terminfo/v/vi603
OLD_FILES+=usr/share/terminfo/v/viewdata
OLD_FILES+=usr/share/terminfo/v/viewdata-o
OLD_FILES+=usr/share/terminfo/v/viewdata-rv
OLD_FILES+=usr/share/terminfo/v/viewpoint
OLD_FILES+=usr/share/terminfo/v/viewpoint3a+
OLD_FILES+=usr/share/terminfo/v/viewpoint60
OLD_FILES+=usr/share/terminfo/v/viewpoint90
OLD_FILES+=usr/share/terminfo/v/vip
OLD_FILES+=usr/share/terminfo/v/vip-H
OLD_FILES+=usr/share/terminfo/v/vip-Hw
OLD_FILES+=usr/share/terminfo/v/vip-w
OLD_FILES+=usr/share/terminfo/v/vip7800-H
OLD_FILES+=usr/share/terminfo/v/vip7800-Hw
OLD_FILES+=usr/share/terminfo/v/vip7800-w
OLD_FILES+=usr/share/terminfo/v/visa50
OLD_FILES+=usr/share/terminfo/v/visual603
OLD_FILES+=usr/share/terminfo/v/vitty
OLD_FILES+=usr/share/terminfo/v/vk100
OLD_FILES+=usr/share/terminfo/v/vp3a+
OLD_FILES+=usr/share/terminfo/v/vp60
OLD_FILES+=usr/share/terminfo/v/vp90
OLD_FILES+=usr/share/terminfo/v/vremote
OLD_FILES+=usr/share/terminfo/v/vs100
OLD_FILES+=usr/share/terminfo/v/vs100-x10
OLD_FILES+=usr/share/terminfo/v/vsc
OLD_FILES+=usr/share/terminfo/v/vscode
OLD_FILES+=usr/share/terminfo/v/vscode-direct
OLD_FILES+=usr/share/terminfo/v/vt-61
OLD_FILES+=usr/share/terminfo/v/vt-utf8
OLD_FILES+=usr/share/terminfo/v/vt100
OLD_FILES+=usr/share/terminfo/v/vt100+
OLD_FILES+=usr/share/terminfo/v/vt100+4bsd
OLD_FILES+=usr/share/terminfo/v/vt100+enq
OLD_FILES+=usr/share/terminfo/v/vt100+fnkeys
OLD_FILES+=usr/share/terminfo/v/vt100+keypad
OLD_FILES+=usr/share/terminfo/v/vt100+pfkeys
OLD_FILES+=usr/share/terminfo/v/vt100-am
OLD_FILES+=usr/share/terminfo/v/vt100-bm
OLD_FILES+=usr/share/terminfo/v/vt100-bm-o
OLD_FILES+=usr/share/terminfo/v/vt100-bot-s
OLD_FILES+=usr/share/terminfo/v/vt100-nam
OLD_FILES+=usr/share/terminfo/v/vt100-nam-w
OLD_FILES+=usr/share/terminfo/v/vt100-nav
OLD_FILES+=usr/share/terminfo/v/vt100-nav-w
OLD_FILES+=usr/share/terminfo/v/vt100-putty
OLD_FILES+=usr/share/terminfo/v/vt100-s
OLD_FILES+=usr/share/terminfo/v/vt100-s-bot
OLD_FILES+=usr/share/terminfo/v/vt100-s-top
OLD_FILES+=usr/share/terminfo/v/vt100-top-s
OLD_FILES+=usr/share/terminfo/v/vt100-vb
OLD_FILES+=usr/share/terminfo/v/vt100-w
OLD_FILES+=usr/share/terminfo/v/vt100-w-am
OLD_FILES+=usr/share/terminfo/v/vt100-w-nam
OLD_FILES+=usr/share/terminfo/v/vt100-w-nav
OLD_FILES+=usr/share/terminfo/v/vt100nam
OLD_FILES+=usr/share/terminfo/v/vt102
OLD_FILES+=usr/share/terminfo/v/vt102+enq
OLD_FILES+=usr/share/terminfo/v/vt102-nsgr
OLD_FILES+=usr/share/terminfo/v/vt102-w
OLD_FILES+=usr/share/terminfo/v/vt125
OLD_FILES+=usr/share/terminfo/v/vt131
OLD_FILES+=usr/share/terminfo/v/vt132
OLD_FILES+=usr/share/terminfo/v/vt200
OLD_FILES+=usr/share/terminfo/v/vt200-8
OLD_FILES+=usr/share/terminfo/v/vt200-8bit
OLD_FILES+=usr/share/terminfo/v/vt200-js
OLD_FILES+=usr/share/terminfo/v/vt200-old
OLD_FILES+=usr/share/terminfo/v/vt200-w
OLD_FILES+=usr/share/terminfo/v/vt220
OLD_FILES+=usr/share/terminfo/v/vt220+cvis
OLD_FILES+=usr/share/terminfo/v/vt220+cvis8
OLD_FILES+=usr/share/terminfo/v/vt220+keypad
OLD_FILES+=usr/share/terminfo/v/vt220+pcedit
OLD_FILES+=usr/share/terminfo/v/vt220+vtedit
OLD_FILES+=usr/share/terminfo/v/vt220-8
OLD_FILES+=usr/share/terminfo/v/vt220-8bit
OLD_FILES+=usr/share/terminfo/v/vt220-base
OLD_FILES+=usr/share/terminfo/v/vt220-js
OLD_FILES+=usr/share/terminfo/v/vt220-nam
OLD_FILES+=usr/share/terminfo/v/vt220-old
OLD_FILES+=usr/share/terminfo/v/vt220-w
OLD_FILES+=usr/share/terminfo/v/vt220d
OLD_FILES+=usr/share/terminfo/v/vt300
OLD_FILES+=usr/share/terminfo/v/vt300-nam
OLD_FILES+=usr/share/terminfo/v/vt300-w
OLD_FILES+=usr/share/terminfo/v/vt300-w-nam
OLD_FILES+=usr/share/terminfo/v/vt320
OLD_FILES+=usr/share/terminfo/v/vt320-k3
OLD_FILES+=usr/share/terminfo/v/vt320-k311
OLD_FILES+=usr/share/terminfo/v/vt320-nam
OLD_FILES+=usr/share/terminfo/v/vt320-w
OLD_FILES+=usr/share/terminfo/v/vt320-w-nam
OLD_FILES+=usr/share/terminfo/v/vt320nam
OLD_FILES+=usr/share/terminfo/v/vt330
OLD_FILES+=usr/share/terminfo/v/vt340
OLD_FILES+=usr/share/terminfo/v/vt400
OLD_FILES+=usr/share/terminfo/v/vt400-24
OLD_FILES+=usr/share/terminfo/v/vt420
OLD_FILES+=usr/share/terminfo/v/vt420+lrmm
OLD_FILES+=usr/share/terminfo/v/vt420f
OLD_FILES+=usr/share/terminfo/v/vt420pc
OLD_FILES+=usr/share/terminfo/v/vt420pcdos
OLD_FILES+=usr/share/terminfo/v/vt50
OLD_FILES+=usr/share/terminfo/v/vt50h
OLD_FILES+=usr/share/terminfo/v/vt510
OLD_FILES+=usr/share/terminfo/v/vt510pc
OLD_FILES+=usr/share/terminfo/v/vt510pcdos
OLD_FILES+=usr/share/terminfo/v/vt52
OLD_FILES+=usr/share/terminfo/v/vt52+keypad
OLD_FILES+=usr/share/terminfo/v/vt52-basic
OLD_FILES+=usr/share/terminfo/v/vt520
OLD_FILES+=usr/share/terminfo/v/vt520ansi
OLD_FILES+=usr/share/terminfo/v/vt525
OLD_FILES+=usr/share/terminfo/v/vt61
OLD_FILES+=usr/share/terminfo/v/vt61.5
OLD_FILES+=usr/share/terminfo/v/vte
OLD_FILES+=usr/share/terminfo/v/vte+pcfkeys
OLD_FILES+=usr/share/terminfo/v/vte-2007
OLD_FILES+=usr/share/terminfo/v/vte-2008
OLD_FILES+=usr/share/terminfo/v/vte-2012
OLD_FILES+=usr/share/terminfo/v/vte-2014
OLD_FILES+=usr/share/terminfo/v/vte-2017
OLD_FILES+=usr/share/terminfo/v/vte-2018
OLD_FILES+=usr/share/terminfo/v/vte-256color
OLD_FILES+=usr/share/terminfo/v/vte-direct
OLD_FILES+=usr/share/terminfo/v/vtnt
OLD_FILES+=usr/share/terminfo/v/vv100
OLD_FILES+=usr/share/terminfo/v/vwmterm
OLD_DIRS+=usr/share/terminfo/v/
OLD_FILES+=usr/share/terminfo/w/wren
OLD_FILES+=usr/share/terminfo/w/wrenw
OLD_FILES+=usr/share/terminfo/w/wsiris
OLD_FILES+=usr/share/terminfo/w/wsvt25
OLD_FILES+=usr/share/terminfo/w/wsvt25m
OLD_FILES+=usr/share/terminfo/w/wy-75ap
OLD_FILES+=usr/share/terminfo/w/wy-99fgt
OLD_FILES+=usr/share/terminfo/w/wy-99fgta
OLD_FILES+=usr/share/terminfo/w/wy100
OLD_FILES+=usr/share/terminfo/w/wy100q
OLD_FILES+=usr/share/terminfo/w/wy120
OLD_FILES+=usr/share/terminfo/w/wy120-25
OLD_FILES+=usr/share/terminfo/w/wy120-25-w
OLD_FILES+=usr/share/terminfo/w/wy120-vb
OLD_FILES+=usr/share/terminfo/w/wy120-w
OLD_FILES+=usr/share/terminfo/w/wy120-w-vb
OLD_FILES+=usr/share/terminfo/w/wy120-wvb
OLD_FILES+=usr/share/terminfo/w/wy150
OLD_FILES+=usr/share/terminfo/w/wy150-25
OLD_FILES+=usr/share/terminfo/w/wy150-25-w
OLD_FILES+=usr/share/terminfo/w/wy150-vb
OLD_FILES+=usr/share/terminfo/w/wy150-w
OLD_FILES+=usr/share/terminfo/w/wy150-w-vb
OLD_FILES+=usr/share/terminfo/w/wy160
OLD_FILES+=usr/share/terminfo/w/wy160-25
OLD_FILES+=usr/share/terminfo/w/wy160-25-w
OLD_FILES+=usr/share/terminfo/w/wy160-42
OLD_FILES+=usr/share/terminfo/w/wy160-42-w
OLD_FILES+=usr/share/terminfo/w/wy160-43
OLD_FILES+=usr/share/terminfo/w/wy160-43-w
OLD_FILES+=usr/share/terminfo/w/wy160-tek
OLD_FILES+=usr/share/terminfo/w/wy160-vb
OLD_FILES+=usr/share/terminfo/w/wy160-w
OLD_FILES+=usr/share/terminfo/w/wy160-w-vb
OLD_FILES+=usr/share/terminfo/w/wy160-wvb
OLD_FILES+=usr/share/terminfo/w/wy185
OLD_FILES+=usr/share/terminfo/w/wy185-24
OLD_FILES+=usr/share/terminfo/w/wy185-vb
OLD_FILES+=usr/share/terminfo/w/wy185-w
OLD_FILES+=usr/share/terminfo/w/wy185-wvb
OLD_FILES+=usr/share/terminfo/w/wy30
OLD_FILES+=usr/share/terminfo/w/wy30-mc
OLD_FILES+=usr/share/terminfo/w/wy30-vb
OLD_FILES+=usr/share/terminfo/w/wy325
OLD_FILES+=usr/share/terminfo/w/wy325-25
OLD_FILES+=usr/share/terminfo/w/wy325-25w
OLD_FILES+=usr/share/terminfo/w/wy325-42
OLD_FILES+=usr/share/terminfo/w/wy325-42w
OLD_FILES+=usr/share/terminfo/w/wy325-42w-vb
OLD_FILES+=usr/share/terminfo/w/wy325-42wvb
OLD_FILES+=usr/share/terminfo/w/wy325-43
OLD_FILES+=usr/share/terminfo/w/wy325-43w
OLD_FILES+=usr/share/terminfo/w/wy325-43w-vb
OLD_FILES+=usr/share/terminfo/w/wy325-43wvb
OLD_FILES+=usr/share/terminfo/w/wy325-80
OLD_FILES+=usr/share/terminfo/w/wy325-vb
OLD_FILES+=usr/share/terminfo/w/wy325-w
OLD_FILES+=usr/share/terminfo/w/wy325-w-vb
OLD_FILES+=usr/share/terminfo/w/wy325-wvb
OLD_FILES+=usr/share/terminfo/w/wy325w-24
OLD_FILES+=usr/share/terminfo/w/wy350
OLD_FILES+=usr/share/terminfo/w/wy350-vb
OLD_FILES+=usr/share/terminfo/w/wy350-w
OLD_FILES+=usr/share/terminfo/w/wy350-wvb
OLD_FILES+=usr/share/terminfo/w/wy370
OLD_FILES+=usr/share/terminfo/w/wy370-101k
OLD_FILES+=usr/share/terminfo/w/wy370-105k
OLD_FILES+=usr/share/terminfo/w/wy370-EPC
OLD_FILES+=usr/share/terminfo/w/wy370-nk
OLD_FILES+=usr/share/terminfo/w/wy370-rv
OLD_FILES+=usr/share/terminfo/w/wy370-tek
OLD_FILES+=usr/share/terminfo/w/wy370-vb
OLD_FILES+=usr/share/terminfo/w/wy370-w
OLD_FILES+=usr/share/terminfo/w/wy370-wvb
OLD_FILES+=usr/share/terminfo/w/wy50
OLD_FILES+=usr/share/terminfo/w/wy50-mc
OLD_FILES+=usr/share/terminfo/w/wy50-vb
OLD_FILES+=usr/share/terminfo/w/wy50-w
OLD_FILES+=usr/share/terminfo/w/wy50-wvb
OLD_FILES+=usr/share/terminfo/w/wy520
OLD_FILES+=usr/share/terminfo/w/wy520-24
OLD_FILES+=usr/share/terminfo/w/wy520-36
OLD_FILES+=usr/share/terminfo/w/wy520-36pc
OLD_FILES+=usr/share/terminfo/w/wy520-36w
OLD_FILES+=usr/share/terminfo/w/wy520-36wpc
OLD_FILES+=usr/share/terminfo/w/wy520-48
OLD_FILES+=usr/share/terminfo/w/wy520-48pc
OLD_FILES+=usr/share/terminfo/w/wy520-48w
OLD_FILES+=usr/share/terminfo/w/wy520-48wpc
OLD_FILES+=usr/share/terminfo/w/wy520-epc
OLD_FILES+=usr/share/terminfo/w/wy520-epc-24
OLD_FILES+=usr/share/terminfo/w/wy520-epc-vb
OLD_FILES+=usr/share/terminfo/w/wy520-epc-w
OLD_FILES+=usr/share/terminfo/w/wy520-epc-wvb
OLD_FILES+=usr/share/terminfo/w/wy520-vb
OLD_FILES+=usr/share/terminfo/w/wy520-w
OLD_FILES+=usr/share/terminfo/w/wy520-wvb
OLD_FILES+=usr/share/terminfo/w/wy60
OLD_FILES+=usr/share/terminfo/w/wy60-25
OLD_FILES+=usr/share/terminfo/w/wy60-25-w
OLD_FILES+=usr/share/terminfo/w/wy60-316X
OLD_FILES+=usr/share/terminfo/w/wy60-42
OLD_FILES+=usr/share/terminfo/w/wy60-42-w
OLD_FILES+=usr/share/terminfo/w/wy60-43
OLD_FILES+=usr/share/terminfo/w/wy60-43-w
OLD_FILES+=usr/share/terminfo/w/wy60-AT
OLD_FILES+=usr/share/terminfo/w/wy60-PC
OLD_FILES+=usr/share/terminfo/w/wy60-vb
OLD_FILES+=usr/share/terminfo/w/wy60-w
OLD_FILES+=usr/share/terminfo/w/wy60-w-vb
OLD_FILES+=usr/share/terminfo/w/wy60-wvb
OLD_FILES+=usr/share/terminfo/w/wy75
OLD_FILES+=usr/share/terminfo/w/wy75-mc
OLD_FILES+=usr/share/terminfo/w/wy75-vb
OLD_FILES+=usr/share/terminfo/w/wy75-w
OLD_FILES+=usr/share/terminfo/w/wy75-wvb
OLD_FILES+=usr/share/terminfo/w/wy75ap
OLD_FILES+=usr/share/terminfo/w/wy85
OLD_FILES+=usr/share/terminfo/w/wy85-8bit
OLD_FILES+=usr/share/terminfo/w/wy85-vb
OLD_FILES+=usr/share/terminfo/w/wy85-w
OLD_FILES+=usr/share/terminfo/w/wy85-wvb
OLD_FILES+=usr/share/terminfo/w/wy99-ansi
OLD_FILES+=usr/share/terminfo/w/wy99a-ansi
OLD_FILES+=usr/share/terminfo/w/wy99f
OLD_FILES+=usr/share/terminfo/w/wy99fa
OLD_FILES+=usr/share/terminfo/w/wy99fgt
OLD_FILES+=usr/share/terminfo/w/wy99fgta
OLD_FILES+=usr/share/terminfo/w/wy99gt
OLD_FILES+=usr/share/terminfo/w/wy99gt-25
OLD_FILES+=usr/share/terminfo/w/wy99gt-25-w
OLD_FILES+=usr/share/terminfo/w/wy99gt-tek
OLD_FILES+=usr/share/terminfo/w/wy99gt-vb
OLD_FILES+=usr/share/terminfo/w/wy99gt-w
OLD_FILES+=usr/share/terminfo/w/wy99gt-w-vb
OLD_FILES+=usr/share/terminfo/w/wy99gt-wvb
OLD_FILES+=usr/share/terminfo/w/wyse-325
OLD_FILES+=usr/share/terminfo/w/wyse-75ap
OLD_FILES+=usr/share/terminfo/w/wyse-vp
OLD_FILES+=usr/share/terminfo/w/wyse120
OLD_FILES+=usr/share/terminfo/w/wyse120-25
OLD_FILES+=usr/share/terminfo/w/wyse120-25-w
OLD_FILES+=usr/share/terminfo/w/wyse120-vb
OLD_FILES+=usr/share/terminfo/w/wyse120-w
OLD_FILES+=usr/share/terminfo/w/wyse120-wvb
OLD_FILES+=usr/share/terminfo/w/wyse150
OLD_FILES+=usr/share/terminfo/w/wyse150-25
OLD_FILES+=usr/share/terminfo/w/wyse150-25-w
OLD_FILES+=usr/share/terminfo/w/wyse150-vb
OLD_FILES+=usr/share/terminfo/w/wyse150-w
OLD_FILES+=usr/share/terminfo/w/wyse150-w-vb
OLD_FILES+=usr/share/terminfo/w/wyse160
OLD_FILES+=usr/share/terminfo/w/wyse160-25
OLD_FILES+=usr/share/terminfo/w/wyse160-25-w
OLD_FILES+=usr/share/terminfo/w/wyse160-42
OLD_FILES+=usr/share/terminfo/w/wyse160-42-w
OLD_FILES+=usr/share/terminfo/w/wyse160-43
OLD_FILES+=usr/share/terminfo/w/wyse160-43-w
OLD_FILES+=usr/share/terminfo/w/wyse160-vb
OLD_FILES+=usr/share/terminfo/w/wyse160-w
OLD_FILES+=usr/share/terminfo/w/wyse160-wvb
OLD_FILES+=usr/share/terminfo/w/wyse185
OLD_FILES+=usr/share/terminfo/w/wyse185-24
OLD_FILES+=usr/share/terminfo/w/wyse185-vb
OLD_FILES+=usr/share/terminfo/w/wyse185-w
OLD_FILES+=usr/share/terminfo/w/wyse185-wvb
OLD_FILES+=usr/share/terminfo/w/wyse30
OLD_FILES+=usr/share/terminfo/w/wyse30-mc
OLD_FILES+=usr/share/terminfo/w/wyse30-vb
OLD_FILES+=usr/share/terminfo/w/wyse325
OLD_FILES+=usr/share/terminfo/w/wyse325-25
OLD_FILES+=usr/share/terminfo/w/wyse325-25w
OLD_FILES+=usr/share/terminfo/w/wyse325-42
OLD_FILES+=usr/share/terminfo/w/wyse325-42w
OLD_FILES+=usr/share/terminfo/w/wyse325-43
OLD_FILES+=usr/share/terminfo/w/wyse325-43w
OLD_FILES+=usr/share/terminfo/w/wyse325-vb
OLD_FILES+=usr/share/terminfo/w/wyse325-w
OLD_FILES+=usr/share/terminfo/w/wyse325-wvb
OLD_FILES+=usr/share/terminfo/w/wyse350
OLD_FILES+=usr/share/terminfo/w/wyse350-vb
OLD_FILES+=usr/share/terminfo/w/wyse350-w
OLD_FILES+=usr/share/terminfo/w/wyse350-wvb
OLD_FILES+=usr/share/terminfo/w/wyse370
OLD_FILES+=usr/share/terminfo/w/wyse50
OLD_FILES+=usr/share/terminfo/w/wyse50-mc
OLD_FILES+=usr/share/terminfo/w/wyse50-vb
OLD_FILES+=usr/share/terminfo/w/wyse50-w
OLD_FILES+=usr/share/terminfo/w/wyse50-wvb
OLD_FILES+=usr/share/terminfo/w/wyse520
OLD_FILES+=usr/share/terminfo/w/wyse520-24
OLD_FILES+=usr/share/terminfo/w/wyse520-36
OLD_FILES+=usr/share/terminfo/w/wyse520-36pc
OLD_FILES+=usr/share/terminfo/w/wyse520-36w
OLD_FILES+=usr/share/terminfo/w/wyse520-36wpc
OLD_FILES+=usr/share/terminfo/w/wyse520-48
OLD_FILES+=usr/share/terminfo/w/wyse520-48pc
OLD_FILES+=usr/share/terminfo/w/wyse520-48w
OLD_FILES+=usr/share/terminfo/w/wyse520-48wpc
OLD_FILES+=usr/share/terminfo/w/wyse520-epc
OLD_FILES+=usr/share/terminfo/w/wyse520-epc-w
OLD_FILES+=usr/share/terminfo/w/wyse520-p-wvb
OLD_FILES+=usr/share/terminfo/w/wyse520-pc-24
OLD_FILES+=usr/share/terminfo/w/wyse520-pc-vb
OLD_FILES+=usr/share/terminfo/w/wyse520-vb
OLD_FILES+=usr/share/terminfo/w/wyse520-w
OLD_FILES+=usr/share/terminfo/w/wyse520-wvb
OLD_FILES+=usr/share/terminfo/w/wyse60
OLD_FILES+=usr/share/terminfo/w/wyse60-25
OLD_FILES+=usr/share/terminfo/w/wyse60-25-w
OLD_FILES+=usr/share/terminfo/w/wyse60-316X
OLD_FILES+=usr/share/terminfo/w/wyse60-42
OLD_FILES+=usr/share/terminfo/w/wyse60-42-w
OLD_FILES+=usr/share/terminfo/w/wyse60-43
OLD_FILES+=usr/share/terminfo/w/wyse60-43-w
OLD_FILES+=usr/share/terminfo/w/wyse60-AT
OLD_FILES+=usr/share/terminfo/w/wyse60-PC
OLD_FILES+=usr/share/terminfo/w/wyse60-vb
OLD_FILES+=usr/share/terminfo/w/wyse60-w
OLD_FILES+=usr/share/terminfo/w/wyse60-wvb
OLD_FILES+=usr/share/terminfo/w/wyse75
OLD_FILES+=usr/share/terminfo/w/wyse75-mc
OLD_FILES+=usr/share/terminfo/w/wyse75-vb
OLD_FILES+=usr/share/terminfo/w/wyse75-w
OLD_FILES+=usr/share/terminfo/w/wyse75-wvb
OLD_FILES+=usr/share/terminfo/w/wyse75ap
OLD_FILES+=usr/share/terminfo/w/wyse85
OLD_FILES+=usr/share/terminfo/w/wyse85-8bit
OLD_FILES+=usr/share/terminfo/w/wyse85-vb
OLD_FILES+=usr/share/terminfo/w/wyse85-w
OLD_FILES+=usr/share/terminfo/w/wyse85-wvb
OLD_FILES+=usr/share/terminfo/w/wyse99gt
OLD_FILES+=usr/share/terminfo/w/wyse99gt-25
OLD_FILES+=usr/share/terminfo/w/wyse99gt-25-w
OLD_FILES+=usr/share/terminfo/w/wyse99gt-vb
OLD_FILES+=usr/share/terminfo/w/wyse99gt-w
OLD_FILES+=usr/share/terminfo/w/wyse99gt-wvb
OLD_DIRS+=usr/share/terminfo/w/
OLD_FILES+=usr/share/terminfo/x/x10term
OLD_FILES+=usr/share/terminfo/x/x1700
OLD_FILES+=usr/share/terminfo/x/x1700-lm
OLD_FILES+=usr/share/terminfo/x/x1720
OLD_FILES+=usr/share/terminfo/x/x1750
OLD_FILES+=usr/share/terminfo/x/x68k
OLD_FILES+=usr/share/terminfo/x/x68k-ite
OLD_FILES+=usr/share/terminfo/x/x820
OLD_FILES+=usr/share/terminfo/x/xdku
OLD_FILES+=usr/share/terminfo/x/xenix
OLD_FILES+=usr/share/terminfo/x/xerox
OLD_FILES+=usr/share/terminfo/x/xerox-lm
OLD_FILES+=usr/share/terminfo/x/xerox1720
OLD_FILES+=usr/share/terminfo/x/xerox820
OLD_FILES+=usr/share/terminfo/x/xfce
OLD_FILES+=usr/share/terminfo/x/xiterm
OLD_FILES+=usr/share/terminfo/x/xl83
OLD_FILES+=usr/share/terminfo/x/xnuppc
OLD_FILES+=usr/share/terminfo/x/xnuppc+100x37
OLD_FILES+=usr/share/terminfo/x/xnuppc+112x37
OLD_FILES+=usr/share/terminfo/x/xnuppc+128x40
OLD_FILES+=usr/share/terminfo/x/xnuppc+128x48
OLD_FILES+=usr/share/terminfo/x/xnuppc+144x48
OLD_FILES+=usr/share/terminfo/x/xnuppc+160x64
OLD_FILES+=usr/share/terminfo/x/xnuppc+200x64
OLD_FILES+=usr/share/terminfo/x/xnuppc+200x75
OLD_FILES+=usr/share/terminfo/x/xnuppc+256x96
OLD_FILES+=usr/share/terminfo/x/xnuppc+80x25
OLD_FILES+=usr/share/terminfo/x/xnuppc+80x30
OLD_FILES+=usr/share/terminfo/x/xnuppc+90x30
OLD_FILES+=usr/share/terminfo/x/xnuppc+b
OLD_FILES+=usr/share/terminfo/x/xnuppc+basic
OLD_FILES+=usr/share/terminfo/x/xnuppc+c
OLD_FILES+=usr/share/terminfo/x/xnuppc+f
OLD_FILES+=usr/share/terminfo/x/xnuppc+f2
OLD_FILES+=usr/share/terminfo/x/xnuppc-100x37
OLD_FILES+=usr/share/terminfo/x/xnuppc-100x37-m
OLD_FILES+=usr/share/terminfo/x/xnuppc-112x37
OLD_FILES+=usr/share/terminfo/x/xnuppc-112x37-m
OLD_FILES+=usr/share/terminfo/x/xnuppc-128x40
OLD_FILES+=usr/share/terminfo/x/xnuppc-128x40-m
OLD_FILES+=usr/share/terminfo/x/xnuppc-128x48
OLD_FILES+=usr/share/terminfo/x/xnuppc-128x48-m
OLD_FILES+=usr/share/terminfo/x/xnuppc-144x48
OLD_FILES+=usr/share/terminfo/x/xnuppc-144x48-m
OLD_FILES+=usr/share/terminfo/x/xnuppc-160x64
OLD_FILES+=usr/share/terminfo/x/xnuppc-160x64-m
OLD_FILES+=usr/share/terminfo/x/xnuppc-200x64
OLD_FILES+=usr/share/terminfo/x/xnuppc-200x64-m
OLD_FILES+=usr/share/terminfo/x/xnuppc-200x75
OLD_FILES+=usr/share/terminfo/x/xnuppc-200x75-m
OLD_FILES+=usr/share/terminfo/x/xnuppc-256x96
OLD_FILES+=usr/share/terminfo/x/xnuppc-256x96-m
OLD_FILES+=usr/share/terminfo/x/xnuppc-80x25
OLD_FILES+=usr/share/terminfo/x/xnuppc-80x25-m
OLD_FILES+=usr/share/terminfo/x/xnuppc-80x30
OLD_FILES+=usr/share/terminfo/x/xnuppc-80x30-m
OLD_FILES+=usr/share/terminfo/x/xnuppc-90x30
OLD_FILES+=usr/share/terminfo/x/xnuppc-90x30-m
OLD_FILES+=usr/share/terminfo/x/xnuppc-b
OLD_FILES+=usr/share/terminfo/x/xnuppc-f
OLD_FILES+=usr/share/terminfo/x/xnuppc-f2
OLD_FILES+=usr/share/terminfo/x/xnuppc-m
OLD_FILES+=usr/share/terminfo/x/xnuppc-m-b
OLD_FILES+=usr/share/terminfo/x/xnuppc-m-f
OLD_FILES+=usr/share/terminfo/x/xnuppc-m-f2
OLD_FILES+=usr/share/terminfo/x/xtalk
OLD_FILES+=usr/share/terminfo/x/xterm
OLD_FILES+=usr/share/terminfo/x/xterm+256color
OLD_FILES+=usr/share/terminfo/x/xterm+256color2
OLD_FILES+=usr/share/terminfo/x/xterm+256setaf
OLD_FILES+=usr/share/terminfo/x/xterm+88color
OLD_FILES+=usr/share/terminfo/x/xterm+88color2
OLD_FILES+=usr/share/terminfo/x/xterm+alt+title
OLD_FILES+=usr/share/terminfo/x/xterm+alt1049
OLD_FILES+=usr/share/terminfo/x/xterm+app
OLD_FILES+=usr/share/terminfo/x/xterm+direct
OLD_FILES+=usr/share/terminfo/x/xterm+direct16
OLD_FILES+=usr/share/terminfo/x/xterm+direct2
OLD_FILES+=usr/share/terminfo/x/xterm+direct256
OLD_FILES+=usr/share/terminfo/x/xterm+edit
OLD_FILES+=usr/share/terminfo/x/xterm+indirect
OLD_FILES+=usr/share/terminfo/x/xterm+kbs
OLD_FILES+=usr/share/terminfo/x/xterm+keypad
OLD_FILES+=usr/share/terminfo/x/xterm+meta
OLD_FILES+=usr/share/terminfo/x/xterm+noalt
OLD_FILES+=usr/share/terminfo/x/xterm+noapp
OLD_FILES+=usr/share/terminfo/x/xterm+nofkeys
OLD_FILES+=usr/share/terminfo/x/xterm+osc104
OLD_FILES+=usr/share/terminfo/x/xterm+pc+edit
OLD_FILES+=usr/share/terminfo/x/xterm+pcc0
OLD_FILES+=usr/share/terminfo/x/xterm+pcc1
OLD_FILES+=usr/share/terminfo/x/xterm+pcc2
OLD_FILES+=usr/share/terminfo/x/xterm+pcc3
OLD_FILES+=usr/share/terminfo/x/xterm+pce2
OLD_FILES+=usr/share/terminfo/x/xterm+pcf0
OLD_FILES+=usr/share/terminfo/x/xterm+pcf2
OLD_FILES+=usr/share/terminfo/x/xterm+pcfkeys
OLD_FILES+=usr/share/terminfo/x/xterm+r6f2
OLD_FILES+=usr/share/terminfo/x/xterm+sl
OLD_FILES+=usr/share/terminfo/x/xterm+sl-twm
OLD_FILES+=usr/share/terminfo/x/xterm+sm+1002
OLD_FILES+=usr/share/terminfo/x/xterm+sm+1003
OLD_FILES+=usr/share/terminfo/x/xterm+sm+1005
OLD_FILES+=usr/share/terminfo/x/xterm+sm+1006
OLD_FILES+=usr/share/terminfo/x/xterm+titlestack
OLD_FILES+=usr/share/terminfo/x/xterm+tmux
OLD_FILES+=usr/share/terminfo/x/xterm+vt+edit
OLD_FILES+=usr/share/terminfo/x/xterm+x10mouse
OLD_FILES+=usr/share/terminfo/x/xterm+x11hilite
OLD_FILES+=usr/share/terminfo/x/xterm+x11mouse
OLD_FILES+=usr/share/terminfo/x/xterm-1002
OLD_FILES+=usr/share/terminfo/x/xterm-1003
OLD_FILES+=usr/share/terminfo/x/xterm-1005
OLD_FILES+=usr/share/terminfo/x/xterm-1006
OLD_FILES+=usr/share/terminfo/x/xterm-16color
OLD_FILES+=usr/share/terminfo/x/xterm-24
OLD_FILES+=usr/share/terminfo/x/xterm-256color
OLD_FILES+=usr/share/terminfo/x/xterm-88color
OLD_FILES+=usr/share/terminfo/x/xterm-8bit
OLD_FILES+=usr/share/terminfo/x/xterm-basic
OLD_FILES+=usr/share/terminfo/x/xterm-bold
OLD_FILES+=usr/share/terminfo/x/xterm-color
OLD_FILES+=usr/share/terminfo/x/xterm-direct
OLD_FILES+=usr/share/terminfo/x/xterm-direct16
OLD_FILES+=usr/share/terminfo/x/xterm-direct2
OLD_FILES+=usr/share/terminfo/x/xterm-direct256
OLD_FILES+=usr/share/terminfo/x/xterm-hp
OLD_FILES+=usr/share/terminfo/x/xterm-mono
OLD_FILES+=usr/share/terminfo/x/xterm-new
OLD_FILES+=usr/share/terminfo/x/xterm-nic
OLD_FILES+=usr/share/terminfo/x/xterm-noapp
OLD_FILES+=usr/share/terminfo/x/xterm-old
OLD_FILES+=usr/share/terminfo/x/xterm-pcolor
OLD_FILES+=usr/share/terminfo/x/xterm-r5
OLD_FILES+=usr/share/terminfo/x/xterm-r6
OLD_FILES+=usr/share/terminfo/x/xterm-sco
OLD_FILES+=usr/share/terminfo/x/xterm-sun
OLD_FILES+=usr/share/terminfo/x/xterm-utf8
OLD_FILES+=usr/share/terminfo/x/xterm-vt220
OLD_FILES+=usr/share/terminfo/x/xterm-vt52
OLD_FILES+=usr/share/terminfo/x/xterm-x10mouse
OLD_FILES+=usr/share/terminfo/x/xterm-x11hilite
OLD_FILES+=usr/share/terminfo/x/xterm-x11mouse
OLD_FILES+=usr/share/terminfo/x/xterm-xf86-v32
OLD_FILES+=usr/share/terminfo/x/xterm-xf86-v33
OLD_FILES+=usr/share/terminfo/x/xterm-xf86-v333
OLD_FILES+=usr/share/terminfo/x/xterm-xf86-v40
OLD_FILES+=usr/share/terminfo/x/xterm-xf86-v43
OLD_FILES+=usr/share/terminfo/x/xterm-xf86-v44
OLD_FILES+=usr/share/terminfo/x/xterm-xfree86
OLD_FILES+=usr/share/terminfo/x/xterm-xi
OLD_FILES+=usr/share/terminfo/x/xterm.js
OLD_FILES+=usr/share/terminfo/x/xterm1
OLD_FILES+=usr/share/terminfo/x/xtermc
OLD_FILES+=usr/share/terminfo/x/xtermm
OLD_FILES+=usr/share/terminfo/x/xterms
OLD_FILES+=usr/share/terminfo/x/xterms-sun
OLD_FILES+=usr/share/terminfo/x/xwsh
OLD_DIRS+=usr/share/terminfo/x/
OLD_FILES+=usr/share/terminfo/z/z-100
OLD_FILES+=usr/share/terminfo/z/z-100bw
OLD_FILES+=usr/share/terminfo/z/z100
OLD_FILES+=usr/share/terminfo/z/z100bw
OLD_FILES+=usr/share/terminfo/z/z110
OLD_FILES+=usr/share/terminfo/z/z110bw
OLD_FILES+=usr/share/terminfo/z/z19
OLD_FILES+=usr/share/terminfo/z/z29
OLD_FILES+=usr/share/terminfo/z/z29a
OLD_FILES+=usr/share/terminfo/z/z29a-kc-bc
OLD_FILES+=usr/share/terminfo/z/z29a-kc-uc
OLD_FILES+=usr/share/terminfo/z/z29a-nkc-bc
OLD_FILES+=usr/share/terminfo/z/z29a-nkc-uc
OLD_FILES+=usr/share/terminfo/z/z29b
OLD_FILES+=usr/share/terminfo/z/z30
OLD_FILES+=usr/share/terminfo/z/z340
OLD_FILES+=usr/share/terminfo/z/z340-nam
OLD_FILES+=usr/share/terminfo/z/z39-a
OLD_FILES+=usr/share/terminfo/z/z39a
OLD_FILES+=usr/share/terminfo/z/z50
OLD_FILES+=usr/share/terminfo/z/z8001
OLD_FILES+=usr/share/terminfo/z/zen30
OLD_FILES+=usr/share/terminfo/z/zen50
OLD_FILES+=usr/share/terminfo/z/zen8001
OLD_FILES+=usr/share/terminfo/z/zenith
OLD_FILES+=usr/share/terminfo/z/zenith29
OLD_FILES+=usr/share/terminfo/z/zenith39-a
OLD_FILES+=usr/share/terminfo/z/zenith39-ansi
OLD_FILES+=usr/share/terminfo/z/zt-1
OLD_FILES+=usr/share/terminfo/z/ztx
OLD_FILES+=usr/share/terminfo/z/ztx-1-a
OLD_FILES+=usr/share/terminfo/z/ztx11
OLD_DIRS+=usr/share/terminfo/z/
OLD_DIRS+=usr/share/terminfo/
# 20210316: remove obsolete NFS headers
OLD_FILES+=usr/include/nfs/nfs_common.h
OLD_FILES+=usr/include/nfsclient/nfsm_subs.h
OLD_FILES+=usr/include/nfsclient/nlminfo.h
OLD_FILES+=usr/include/nfsserver/nfs_fha_old.h
OLD_FILES+=usr/include/nfsserver/nfsm_subs.h
OLD_FILES+=usr/include/nfsserver/nfsrvcache.h
# 20210315: Remove kernel-only crypto headers from /usr/include
OLD_FILES+=usr/include/crypto/_cryptodev.h
OLD_FILES+=usr/include/crypto/cbc_mac.h
OLD_FILES+=usr/include/crypto/deflate.h
OLD_FILES+=usr/include/crypto/gfmult.h
OLD_FILES+=usr/include/crypto/gmac.h
OLD_FILES+=usr/include/crypto/rijndael.h
OLD_FILES+=usr/include/crypto/rmd160.h
OLD_FILES+=usr/include/crypto/xform.h
OLD_FILES+=usr/include/crypto/xform_auth.h
OLD_FILES+=usr/include/crypto/xform_comp.h
OLD_FILES+=usr/include/crypto/xform_enc.h
# 20210305: removed Poly1305_* symbols
OLD_FILES+=usr/include/crypto/xform_poly1305.h
# 20210302: fmtree removed
OLD_FILES+=usr/sbin/fmtree
OLD_FILES+=usr/share/man/man8/fmtree.8.gz
# 20210201: bump shared libraries which link against ncurses
OLD_LIBS+=lib/libedit.so.7
OLD_LIBS+=usr/lib/libdialog.so.8
OLD_LIBS+=usr/lib/libdpv.so.1
OLD_LIBS+=usr/lib/libform.so.5
OLD_LIBS+=usr/lib/libformw.so.5
OLD_LIBS+=usr/lib/libmenu.so.5
OLD_LIBS+=usr/lib/libmenuw.so.5
OLD_LIBS+=usr/lib/libpanel.so.5
OLD_LIBS+=usr/lib/libpanelw.so.5
# 20210125: ndis driver support removed
OLD_FILES+=usr/sbin/ndis_events
OLD_FILES+=usr/sbin/ndiscvt
OLD_FILES+=usr/sbin/ndisgen
OLD_FILES+=usr/share/man/man4/ndis.4.gz
OLD_FILES+=usr/share/man/man4/if_ndis.4.gz
OLD_FILES+=usr/share/man/man8/ndis_events.8.gz
OLD_FILES+=usr/share/man/man8/ndiscvt.8.gz
OLD_FILES+=usr/share/man/man8/ndisgen.8.gz
OLD_FILES+=usr/share/misc/windrv_stub.c
# 20210116: openpromio(4) removed
OLD_FILES+=usr/include/dev/ofw/openpromio.h
# 20210116: if_wl_wavelan.h removed
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/machine/if_wl_wavelan.h
.endif
# 20210108: retire cmx, ng_bt3c, wi drivers
OLD_FILES+=usr/include/dev/wi/if_wireg.h
OLD_FILES+=usr/include/dev/wi/if_wavelan_ieee.h
OLD_FILES+=usr/include/dev/wi/if_wivar.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_bt3c.h
OLD_FILES+=usr/sbin/bt3cfw
OLD_FILES+=usr/share/man/man4/cmw.4.gz
OLD_FILES+=usr/share/man/man4/if_wi.4.gz
OLD_FILES+=usr/share/man/man4/ng_bt3c.4.gz
OLD_FILES+=usr/share/man/man4/wi.4.gz
OLD_FILES+=usr/share/man/man8/bt3cfw.8.gz
# 20210107: retire a.out support
OLD_DIRS+=usr/lib/aout
OLD_DIRS+=usr/lib/compat/aout
# 20210107: remove cmx(4)
OLD_FILES+=usr/share/man/man4/cmx.4.gz
# 20210105: remove non widechar version of ncurses
OLD_LIBS+=lib/libncurses.so.9
# 20210103: new clang import which bumps version from 11.0.0 to 11.0.1
OLD_FILES+=usr/lib/clang/11.0.0/include/cuda_wrappers/algorithm
OLD_FILES+=usr/lib/clang/11.0.0/include/cuda_wrappers/complex
OLD_FILES+=usr/lib/clang/11.0.0/include/cuda_wrappers/new
OLD_DIRS+=usr/lib/clang/11.0.0/include/cuda_wrappers
OLD_FILES+=usr/lib/clang/11.0.0/include/fuzzer/FuzzedDataProvider.h
OLD_DIRS+=usr/lib/clang/11.0.0/include/fuzzer
OLD_FILES+=usr/lib/clang/11.0.0/include/openmp_wrappers/__clang_openmp_device_functions.h
OLD_FILES+=usr/lib/clang/11.0.0/include/openmp_wrappers/cmath
OLD_FILES+=usr/lib/clang/11.0.0/include/openmp_wrappers/complex
OLD_FILES+=usr/lib/clang/11.0.0/include/openmp_wrappers/complex.h
OLD_FILES+=usr/lib/clang/11.0.0/include/openmp_wrappers/math.h
OLD_FILES+=usr/lib/clang/11.0.0/include/openmp_wrappers/new
OLD_DIRS+=usr/lib/clang/11.0.0/include/openmp_wrappers
OLD_FILES+=usr/lib/clang/11.0.0/include/ppc_wrappers/emmintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/ppc_wrappers/mm_malloc.h
OLD_FILES+=usr/lib/clang/11.0.0/include/ppc_wrappers/mmintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/ppc_wrappers/pmmintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/ppc_wrappers/smmintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/ppc_wrappers/tmmintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/ppc_wrappers/xmmintrin.h
OLD_DIRS+=usr/lib/clang/11.0.0/include/ppc_wrappers
OLD_FILES+=usr/lib/clang/11.0.0/include/profile/InstrProfData.inc
OLD_DIRS+=usr/lib/clang/11.0.0/include/profile
OLD_FILES+=usr/lib/clang/11.0.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/11.0.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/11.0.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/11.0.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/11.0.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/11.0.0/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/11.0.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/11.0.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/11.0.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/11.0.0/include/sanitizer/netbsd_syscall_hooks.h
OLD_FILES+=usr/lib/clang/11.0.0/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/11.0.0/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/11.0.0/include/sanitizer/tsan_interface_atomic.h
OLD_FILES+=usr/lib/clang/11.0.0/include/sanitizer/ubsan_interface.h
OLD_DIRS+=usr/lib/clang/11.0.0/include/sanitizer
OLD_FILES+=usr/lib/clang/11.0.0/include/xray/xray_interface.h
OLD_FILES+=usr/lib/clang/11.0.0/include/xray/xray_log_interface.h
OLD_FILES+=usr/lib/clang/11.0.0/include/xray/xray_records.h
OLD_DIRS+=usr/lib/clang/11.0.0/include/xray
OLD_FILES+=usr/lib/clang/11.0.0/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/11.0.0/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/11.0.0/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/11.0.0/include/__clang_cuda_device_functions.h
OLD_FILES+=usr/lib/clang/11.0.0/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/11.0.0/include/__clang_cuda_libdevice_declares.h
OLD_FILES+=usr/lib/clang/11.0.0/include/__clang_cuda_math.h
OLD_FILES+=usr/lib/clang/11.0.0/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/11.0.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/11.0.0/include/__clang_hip_libdevice_declares.h
OLD_FILES+=usr/lib/clang/11.0.0/include/__clang_hip_math.h
OLD_FILES+=usr/lib/clang/11.0.0/include/__clang_hip_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/11.0.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/11.0.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/11.0.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/11.0.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/altivec.h
OLD_FILES+=usr/lib/clang/11.0.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/amxintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/arm64intr.h
OLD_FILES+=usr/lib/clang/11.0.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/11.0.0/include/arm_bf16.h
OLD_FILES+=usr/lib/clang/11.0.0/include/arm_cde.h
OLD_FILES+=usr/lib/clang/11.0.0/include/arm_cmse.h
OLD_FILES+=usr/lib/clang/11.0.0/include/arm_fp16.h
OLD_FILES+=usr/lib/clang/11.0.0/include/arm_mve.h
OLD_FILES+=usr/lib/clang/11.0.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/11.0.0/include/arm_sve.h
OLD_FILES+=usr/lib/clang/11.0.0/include/armintr.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512bf16intrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512vlbf16intrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512vlvp2intersectintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512vp2intersectintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/cet.h
OLD_FILES+=usr/lib/clang/11.0.0/include/cetintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/cldemoteintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/11.0.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/enqcmdintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/invpcidintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/11.0.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/11.0.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/11.0.0/include/movdirintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/msa.h
OLD_FILES+=usr/lib/clang/11.0.0/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/opencl-c-base.h
OLD_FILES+=usr/lib/clang/11.0.0/include/opencl-c.h
OLD_FILES+=usr/lib/clang/11.0.0/include/pconfigintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/ptwriteintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/serializeintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/sgxintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/tsxldtrkintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/11.0.0/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/waitpkgintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/wasm_simd128.h
OLD_FILES+=usr/lib/clang/11.0.0/include/wbnoinvdintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/11.0.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/11.0.0/include
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.asan-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.asan-aarch64.so
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.asan-arm.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.asan-arm.so
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.asan-armhf.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.asan-armhf.so
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.asan-preinit-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.asan-preinit-arm.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.asan-preinit-armhf.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.asan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.asan_cxx-arm.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.asan_cxx-armhf.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.cfi-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.cfi-arm.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.cfi-armhf.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.cfi-i386.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.cfi-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.cfi_diag-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.cfi_diag-arm.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.cfi_diag-armhf.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.cfi_diag-i386.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.cfi_diag-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.dd-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.dd-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.fuzzer-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.fuzzer-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.fuzzer_no_main-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.fuzzer_no_main-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.msan-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.msan-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.msan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.profile-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.profile-powerpc.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.profile-powerpc64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.profile-powerpc64le.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.safestack-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.stats-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.stats-arm.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.stats-armhf.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.stats_client-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.stats_client-arm.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.stats_client-armhf.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.tsan-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.tsan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.ubsan_minimal-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.ubsan_minimal-arm.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.ubsan_minimal-armhf.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.ubsan_standalone-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.ubsan_standalone-arm.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.ubsan_standalone-armhf.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-arm.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-armhf.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.xray-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.xray-arm.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.xray-armhf.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.xray-basic-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.xray-basic-arm.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.xray-basic-armhf.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.xray-basic-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.xray-fdr-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.xray-fdr-arm.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.xray-fdr-armhf.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.xray-fdr-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.xray-profiling-aarch64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.xray-profiling-arm.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.xray-profiling-armhf.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.xray-profiling-x86_64.a
OLD_FILES+=usr/lib/clang/11.0.0/lib/freebsd/libclang_rt.xray-x86_64.a
OLD_DIRS+=usr/lib/clang/11.0.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/11.0.0/lib
OLD_DIRS+=usr/lib/clang/11.0.0
# 20201226: Siemens SAB 82532 support removed
OLD_FILES+=usr/include/dev/ic/sab82532.h
# 20201225: PMC for Xscale removed
OLD_FILES+=usr/include/dev/hwpmc/hwpmc_xscale.h
OLD_FILES+=usr/share/man/man3/pmc.xscale.3.gz
# 20201225: libregex removed
OLD_FILES+=usr/include/gnu/posix/regex.h
OLD_DIRS+=usr/include/gnu/posix
OLD_FILES+=usr/include/gnu/regex.h
OLD_DIRS+=usr/include/gnu
OLD_FILES+=usr/include/gnuregex.h
OLD_FILES+=usr/lib/libgnuregex.a
OLD_FILES+=usr/lib/libgnuregex.so
OLD_LIBS+=usr/lib/libgnuregex.so.5
OLD_FILES+=usr/lib/libgnuregex_p.a
# 20201225: gnugrep removed
OLD_FILES+=usr/bin/bsdgrep
OLD_FILES+=usr/bin/gnugrep
OLD_FILES+=usr/share/man/man1/bsdgrep.1.gz
OLD_FILES+=usr/share/man/man1/gnugrep.1.gz
# 20201224: mk48txx(4) removed
OLD_FILES+=usr/share/man/man4/mk48txx.4.gz
# 20201215: in-tree gdb removed
OLD_FILES+=usr/libexec/gdb
OLD_FILES+=usr/libexec/kgdb
# 20201211: hme(4) removed
OLD_FILES+=usr/share/man/man4/hme.4.gz
OLD_FILES+=usr/share/man/man4/if_hme.4.gz
# 20201129: RADIX_MPATH removed
OLD_FILES+=usr/include/net/radix_mpath.h
# 20201124: ping6(8) was merged into ping(8)
OLD_FILES+=usr/share/man/man8/ping6.8.gz
OLD_FILES+=usr/tests/sbin/ping6/Kyuafile
OLD_FILES+=usr/tests/sbin/ping6/ping6_c1_s8_t1.out
OLD_FILES+=usr/tests/sbin/ping6/ping6_test
OLD_DIRS+=usr/tests/sbin/ping6
# 20201025: Remove cal data files
OLD_FILES+=usr/share/calendar/calendar.all
OLD_FILES+=usr/share/calendar/calendar.australia
OLD_FILES+=usr/share/calendar/calendar.birthday
OLD_FILES+=usr/share/calendar/calendar.brazilian
OLD_FILES+=usr/share/calendar/calendar.christian
OLD_FILES+=usr/share/calendar/calendar.computer
OLD_FILES+=usr/share/calendar/calendar.croatian
OLD_FILES+=usr/share/calendar/calendar.dutch
OLD_FILES+=usr/share/calendar/calendar.french
OLD_FILES+=usr/share/calendar/calendar.german
OLD_FILES+=usr/share/calendar/calendar.history
OLD_FILES+=usr/share/calendar/calendar.holiday
OLD_FILES+=usr/share/calendar/calendar.hungarian
OLD_FILES+=usr/share/calendar/calendar.judaic
OLD_FILES+=usr/share/calendar/calendar.lotr
OLD_FILES+=usr/share/calendar/calendar.music
OLD_FILES+=usr/share/calendar/calendar.newzealand
OLD_FILES+=usr/share/calendar/calendar.russian
OLD_FILES+=usr/share/calendar/calendar.southafrica
OLD_FILES+=usr/share/calendar/calendar.ukrainian
OLD_FILES+=usr/share/calendar/calendar.usholiday
OLD_FILES+=usr/share/calendar/calendar.world
OLD_FILES+=usr/share/calendar/de_AT.ISO_8859-15/calendar.feiertag
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.all
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.feiertag
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.geschichte
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.kirche
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.literatur
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.musik
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.wissenschaft
OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-1/calendar.all
OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-1/calendar.fetes
OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-1/calendar.french
OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-1/calendar.jferies
OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-1/calendar.proverbes
OLD_FILES+=usr/share/calendar/hr_HR.ISO8859-2/calendar.all
OLD_FILES+=usr/share/calendar/hr_HR.ISO8859-2/calendar.praznici
OLD_FILES+=usr/share/calendar/hu_HU.ISO8859-2/calendar.all
OLD_FILES+=usr/share/calendar/hu_HU.ISO8859-2/calendar.nevnapok
OLD_FILES+=usr/share/calendar/hu_HU.ISO8859-2/calendar.unnepek
OLD_FILES+=usr/share/calendar/pt_BR.ISO8859-1/calendar.all
OLD_FILES+=usr/share/calendar/pt_BR.ISO8859-1/calendar.commemorative
OLD_FILES+=usr/share/calendar/pt_BR.ISO8859-1/calendar.holidays
OLD_FILES+=usr/share/calendar/pt_BR.ISO8859-1/calendar.mcommemorative
OLD_FILES+=usr/share/calendar/pt_BR.UTF-8/calendar.all
OLD_FILES+=usr/share/calendar/pt_BR.UTF-8/calendar.commemorative
OLD_FILES+=usr/share/calendar/pt_BR.UTF-8/calendar.holidays
OLD_FILES+=usr/share/calendar/pt_BR.UTF-8/calendar.mcommemorative
OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.all
OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.common
OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.holiday
OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.military
OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.orthodox
OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.pagan
OLD_FILES+=usr/share/calendar/ru_RU.UTF-8/calendar.all
OLD_FILES+=usr/share/calendar/ru_RU.UTF-8/calendar.common
OLD_FILES+=usr/share/calendar/ru_RU.UTF-8/calendar.holiday
OLD_FILES+=usr/share/calendar/ru_RU.UTF-8/calendar.military
OLD_FILES+=usr/share/calendar/ru_RU.UTF-8/calendar.orthodox
OLD_FILES+=usr/share/calendar/ru_RU.UTF-8/calendar.pagan
OLD_FILES+=usr/share/calendar/uk_UA.KOI8-U/calendar.all
OLD_FILES+=usr/share/calendar/uk_UA.KOI8-U/calendar.holiday
OLD_FILES+=usr/share/calendar/uk_UA.KOI8-U/calendar.misc
OLD_FILES+=usr/share/calendar/uk_UA.KOI8-U/calendar.orthodox
OLD_DIRS+=usr/share/calendar/de_AT.ISO_8859-15
OLD_DIRS+=usr/share/calendar/de_DE.ISO8859-1
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-15
OLD_DIRS+=usr/share/calendar/fr_FR.ISO8859-1
OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-15
OLD_DIRS+=usr/share/calendar/hr_HR.ISO8859-2
OLD_DIRS+=usr/share/calendar/hu_HU.ISO8859-2
OLD_DIRS+=usr/share/calendar/pt_BR.ISO8859-1
OLD_DIRS+=usr/share/calendar/pt_BR.UTF-8
OLD_DIRS+=usr/share/calendar/ru_RU.KOI8-R
OLD_DIRS+=usr/share/calendar/ru_RU.UTF-8
OLD_DIRS+=usr/share/calendar/uk_UA.KOI8-U
# 20201004: logo files renamed to type-agnostic gfx-*.lua
OLD_FILES+=boot/lua/logo-beastie.lua
OLD_FILES+=boot/lua/logo-beastiebw.lua
OLD_FILES+=boot/lua/logo-fbsdbw.lua
OLD_FILES+=boot/lua/logo-orb.lua
OLD_FILES+=boot/lua/logo-orbbw.lua
# 20200828: net/route/shared.h moved to net/route/route_var.h
OLD_FILES+=usr/include/net/route/shared.h
# 20200825: merged OpenZFS support
OLD_LIBS+=lib/libzfs.so.3
OLD_FILES+=usr/share/man/man1/zstreamdump.1.gz
#OLD_FILES+=usr/share/man/man7/zpool-features.7.gz
# 20200923: memfd_test moved to /usr/tests/sys/posixshm
OLD_FILES+=usr/tests/sys/kern/memfd_test
# 20200910: remove vm_map_create(9) to sync with the code
OLD_FILES+=usr/share/man/man9/vm_map_create.9.gz
# 20200820: Removal of the ufm driver
OLD_FILES+=usr/share/man/man4/ufm.4.gz
# 20200816: new clang import which bumps version from 10.0.1 to 11.0.0
OLD_FILES+=usr/lib/clang/10.0.1/include/cuda_wrappers/algorithm
OLD_FILES+=usr/lib/clang/10.0.1/include/cuda_wrappers/complex
OLD_FILES+=usr/lib/clang/10.0.1/include/cuda_wrappers/new
OLD_DIRS+=usr/lib/clang/10.0.1/include/cuda_wrappers
OLD_FILES+=usr/lib/clang/10.0.1/include/fuzzer/FuzzedDataProvider.h
OLD_DIRS+=usr/lib/clang/10.0.1/include/fuzzer
OLD_FILES+=usr/lib/clang/10.0.1/include/openmp_wrappers/__clang_openmp_math.h
OLD_FILES+=usr/lib/clang/10.0.1/include/openmp_wrappers/__clang_openmp_math_declares.h
OLD_FILES+=usr/lib/clang/10.0.1/include/openmp_wrappers/cmath
OLD_FILES+=usr/lib/clang/10.0.1/include/openmp_wrappers/math.h
OLD_DIRS+=usr/lib/clang/10.0.1/include/openmp_wrappers
OLD_FILES+=usr/lib/clang/10.0.1/include/ppc_wrappers/emmintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/ppc_wrappers/mm_malloc.h
OLD_FILES+=usr/lib/clang/10.0.1/include/ppc_wrappers/mmintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/ppc_wrappers/pmmintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/ppc_wrappers/smmintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/ppc_wrappers/tmmintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/ppc_wrappers/xmmintrin.h
OLD_DIRS+=usr/lib/clang/10.0.1/include/ppc_wrappers
OLD_FILES+=usr/lib/clang/10.0.1/include/profile/InstrProfData.inc
OLD_DIRS+=usr/lib/clang/10.0.1/include/profile
OLD_FILES+=usr/lib/clang/10.0.1/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/10.0.1/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/10.0.1/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/10.0.1/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/10.0.1/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/10.0.1/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/10.0.1/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/10.0.1/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/10.0.1/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/10.0.1/include/sanitizer/netbsd_syscall_hooks.h
OLD_FILES+=usr/lib/clang/10.0.1/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/10.0.1/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/10.0.1/include/sanitizer/tsan_interface_atomic.h
OLD_FILES+=usr/lib/clang/10.0.1/include/sanitizer/ubsan_interface.h
OLD_DIRS+=usr/lib/clang/10.0.1/include/sanitizer
OLD_FILES+=usr/lib/clang/10.0.1/include/xray/xray_interface.h
OLD_FILES+=usr/lib/clang/10.0.1/include/xray/xray_log_interface.h
OLD_FILES+=usr/lib/clang/10.0.1/include/xray/xray_records.h
OLD_DIRS+=usr/lib/clang/10.0.1/include/xray
OLD_FILES+=usr/lib/clang/10.0.1/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/10.0.1/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/10.0.1/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/10.0.1/include/__clang_cuda_device_functions.h
OLD_FILES+=usr/lib/clang/10.0.1/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/10.0.1/include/__clang_cuda_libdevice_declares.h
OLD_FILES+=usr/lib/clang/10.0.1/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/10.0.1/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/10.0.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/10.0.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/10.0.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/10.0.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/altivec.h
OLD_FILES+=usr/lib/clang/10.0.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/arm64intr.h
OLD_FILES+=usr/lib/clang/10.0.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/10.0.1/include/arm_cmse.h
OLD_FILES+=usr/lib/clang/10.0.1/include/arm_fp16.h
OLD_FILES+=usr/lib/clang/10.0.1/include/arm_mve.h
OLD_FILES+=usr/lib/clang/10.0.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/10.0.1/include/armintr.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512bf16intrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512vlbf16intrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512vlvp2intersectintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512vp2intersectintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/cetintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/cldemoteintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/10.0.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/enqcmdintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/htmintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/invpcidintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/10.0.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/10.0.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/10.0.1/include/movdirintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/msa.h
OLD_FILES+=usr/lib/clang/10.0.1/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/opencl-c-base.h
OLD_FILES+=usr/lib/clang/10.0.1/include/opencl-c.h
OLD_FILES+=usr/lib/clang/10.0.1/include/pconfigintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/ptwriteintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/s390intrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/sgxintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/vadefs.h
OLD_FILES+=usr/lib/clang/10.0.1/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/vecintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/waitpkgintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/wbnoinvdintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/xopintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/10.0.1/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/10.0.1/include
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.asan-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.asan-aarch64.so
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.asan-arm.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.asan-arm.so
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.asan-armhf.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.asan-armhf.so
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.asan-preinit-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.asan-preinit-arm.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.asan-preinit-armhf.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.asan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.asan_cxx-arm.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.asan_cxx-armhf.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.cfi-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.cfi-arm.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.cfi-armhf.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.cfi-i386.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.cfi-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.cfi_diag-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.cfi_diag-arm.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.cfi_diag-armhf.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.cfi_diag-i386.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.cfi_diag-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.dd-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.dd-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.fuzzer-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.fuzzer-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.fuzzer_no_main-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.fuzzer_no_main-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.msan-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.msan-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.msan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.profile-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.profile-powerpc.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.profile-powerpc64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.safestack-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.stats-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.stats-arm.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.stats-armhf.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.stats_client-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.stats_client-arm.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.stats_client-armhf.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.tsan-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.tsan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.ubsan_minimal-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.ubsan_minimal-arm.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.ubsan_minimal-armhf.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.ubsan_standalone-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.ubsan_standalone-arm.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.ubsan_standalone-armhf.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-arm.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-armhf.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.xray-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.xray-arm.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.xray-armhf.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.xray-basic-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.xray-basic-arm.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.xray-basic-armhf.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.xray-basic-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.xray-fdr-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.xray-fdr-arm.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.xray-fdr-armhf.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.xray-fdr-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.xray-profiling-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.xray-profiling-arm.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.xray-profiling-armhf.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.xray-profiling-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.1/lib/freebsd/libclang_rt.xray-x86_64.a
OLD_DIRS+=usr/lib/clang/10.0.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/10.0.1/lib
OLD_DIRS+=usr/lib/clang/10.0.1
# 20200803: remove free_domain(9) and uma_zfree_domain(9)
OLD_FILES+=usr/share/man/man9/free_domain.9.gz
OLD_FILES+=usr/share/man/man9/uma_zfree_domain.9.gz
# 20200729: remove long expired serial drivers
OLD_FILES+=usr/share/man/man4/cy.4.gz
OLD_FILES+=usr/share/man/man4/rc.4.gz
OLD_FILES+=usr/share/man/man4/rp.4.gz
# 20200721: sys/iommu.h moved to dev/iommu/
OLD_FILES+=usr/include/sys/iommu.h
# 20200715: rework of devstat(9) man page
OLD_FILES+=usr/share/man/man9/devstat_add_entry.9.gz
# 20200714: update byacc to 20200330
OLD_FILES+=usr/tests/usr.bin/yacc/btyacc_calc1.y
OLD_FILES+=usr/tests/usr.bin/yacc/btyacc_demo.y
OLD_FILES+=usr/tests/usr.bin/yacc/btyacc_destroy1.y
OLD_FILES+=usr/tests/usr.bin/yacc/btyacc_destroy2.y
OLD_FILES+=usr/tests/usr.bin/yacc/btyacc_destroy3.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_inherit1.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_inherit2.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_inherit3.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_inherit4.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_inherit5.y
OLD_FILES+=usr/tests/usr.bin/yacc/inherit0.y
OLD_FILES+=usr/tests/usr.bin/yacc/inherit1.y
OLD_FILES+=usr/tests/usr.bin/yacc/inherit2.y
# 20200706: update of sglist(9), r360574
OLD_FILES+=usr/share/man/man9/sglist_append_ext_pgs.9.gz
OLD_FILES+=usr/share/man/man9/sglist_append_mb_ext_pgs.9.gz
OLD_FILES+=usr/share/man/man9/sglist_count_ext_pgs.9.gz
OLD_FILES+=usr/share/man/man9/sglist_count_mb_ext_pgs.9.gz
# 20200617: update opencsd to 0.14.2
OLD_FILES+=usr/include/opencsd/etmv4/trc_pkt_elem_etmv4d.h
# 20200606: retire binutils build infrastructure
OLD_FILES+=usr/bin/as
OLD_FILES+=usr/bin/ld.bfd
OLD_FILES+=usr/share/man/man1/as.1.gz
OLD_FILES+=usr/share/man/man1/ld.bfd.1.gz
OLD_FILES+=usr/share/man/man7/as.7.gz
OLD_FILES+=usr/share/man/man7/ld.7.gz
OLD_FILES+=usr/share/man/man7/ldint.7.gz
OLD_FILES+=usr/share/man/man7/binutils.7.gz
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.x
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xbn
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xc
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xd
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xdc
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xdw
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xn
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xr
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xs
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xsc
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xsw
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xu
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xw
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.x
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xbn
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xc
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xd
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xdc
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xdw
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xn
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xr
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xs
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xsc
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xsw
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xu
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xw
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xw
# 20200601: OpenSSL 32-bit compat engines moved to /usr/lib32/engines
OLD_LIBS+=usr/lib32/capi.so
OLD_LIBS+=usr/lib32/padlock.so
# 20200528: libevent renamed libevent1
OLD_FILES+=usr/include/private/event/event.h
OLD_DIRS+=usr/include/private/event
OLD_FILES+=usr/lib/libprivateevent.a
OLD_FILES+=usr/lib/libprivateevent.so
OLD_LIBS+=usr/lib/libprivateevent.so.1
OLD_FILES+=usr/lib/libprivateevent_p.a
# 20200523: new clang import which bumps version from 10.0.0 to 10.0.1
OLD_FILES+=usr/lib/clang/10.0.0/include/cuda_wrappers/algorithm
OLD_FILES+=usr/lib/clang/10.0.0/include/cuda_wrappers/complex
OLD_FILES+=usr/lib/clang/10.0.0/include/cuda_wrappers/new
OLD_DIRS+=usr/lib/clang/10.0.0/include/cuda_wrappers
OLD_FILES+=usr/lib/clang/10.0.0/include/fuzzer/FuzzedDataProvider.h
OLD_DIRS+=usr/lib/clang/10.0.0/include/fuzzer
OLD_FILES+=usr/lib/clang/10.0.0/include/openmp_wrappers/__clang_openmp_math.h
OLD_FILES+=usr/lib/clang/10.0.0/include/openmp_wrappers/__clang_openmp_math_declares.h
OLD_FILES+=usr/lib/clang/10.0.0/include/openmp_wrappers/cmath
OLD_FILES+=usr/lib/clang/10.0.0/include/openmp_wrappers/math.h
OLD_DIRS+=usr/lib/clang/10.0.0/include/openmp_wrappers
OLD_FILES+=usr/lib/clang/10.0.0/include/ppc_wrappers/emmintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/ppc_wrappers/mm_malloc.h
OLD_FILES+=usr/lib/clang/10.0.0/include/ppc_wrappers/mmintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/ppc_wrappers/pmmintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/ppc_wrappers/smmintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/ppc_wrappers/tmmintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/ppc_wrappers/xmmintrin.h
OLD_DIRS+=usr/lib/clang/10.0.0/include/ppc_wrappers
OLD_FILES+=usr/lib/clang/10.0.0/include/profile/InstrProfData.inc
OLD_DIRS+=usr/lib/clang/10.0.0/include/profile
OLD_FILES+=usr/lib/clang/10.0.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/10.0.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/10.0.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/10.0.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/10.0.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/10.0.0/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/10.0.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/10.0.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/10.0.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/10.0.0/include/sanitizer/netbsd_syscall_hooks.h
OLD_FILES+=usr/lib/clang/10.0.0/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/10.0.0/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/10.0.0/include/sanitizer/tsan_interface_atomic.h
OLD_FILES+=usr/lib/clang/10.0.0/include/sanitizer/ubsan_interface.h
OLD_DIRS+=usr/lib/clang/10.0.0/include/sanitizer
OLD_FILES+=usr/lib/clang/10.0.0/include/xray/xray_interface.h
OLD_FILES+=usr/lib/clang/10.0.0/include/xray/xray_log_interface.h
OLD_FILES+=usr/lib/clang/10.0.0/include/xray/xray_records.h
OLD_DIRS+=usr/lib/clang/10.0.0/include/xray
OLD_FILES+=usr/lib/clang/10.0.0/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/10.0.0/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/10.0.0/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/10.0.0/include/__clang_cuda_device_functions.h
OLD_FILES+=usr/lib/clang/10.0.0/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/10.0.0/include/__clang_cuda_libdevice_declares.h
OLD_FILES+=usr/lib/clang/10.0.0/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/10.0.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/10.0.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/10.0.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/10.0.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/10.0.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/altivec.h
OLD_FILES+=usr/lib/clang/10.0.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/arm64intr.h
OLD_FILES+=usr/lib/clang/10.0.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/10.0.0/include/arm_cmse.h
OLD_FILES+=usr/lib/clang/10.0.0/include/arm_fp16.h
OLD_FILES+=usr/lib/clang/10.0.0/include/arm_mve.h
OLD_FILES+=usr/lib/clang/10.0.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/10.0.0/include/armintr.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512bf16intrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512vlbf16intrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512vlvp2intersectintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512vp2intersectintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/cetintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/cldemoteintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/10.0.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/enqcmdintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/invpcidintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/10.0.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/10.0.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/10.0.0/include/movdirintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/msa.h
OLD_FILES+=usr/lib/clang/10.0.0/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/opencl-c-base.h
OLD_FILES+=usr/lib/clang/10.0.0/include/opencl-c.h
OLD_FILES+=usr/lib/clang/10.0.0/include/pconfigintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/ptwriteintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/sgxintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/10.0.0/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/waitpkgintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/wbnoinvdintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/10.0.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/10.0.0/include
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.asan-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.asan-aarch64.so
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.asan-arm.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.asan-arm.so
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.asan-armhf.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.asan-armhf.so
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.asan-preinit-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.asan-preinit-arm.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.asan-preinit-armhf.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.asan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.asan_cxx-arm.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.asan_cxx-armhf.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.cfi-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.cfi-arm.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.cfi-armhf.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.cfi-i386.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.cfi-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.cfi_diag-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.cfi_diag-arm.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.cfi_diag-armhf.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.cfi_diag-i386.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.cfi_diag-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.dd-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.dd-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.fuzzer-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.fuzzer-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.fuzzer_no_main-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.fuzzer_no_main-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.msan-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.msan-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.msan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.profile-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.profile-powerpc.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.profile-powerpc64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.safestack-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.stats-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.stats-arm.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.stats-armhf.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.stats_client-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.stats_client-arm.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.stats_client-armhf.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.tsan-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.tsan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.ubsan_minimal-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.ubsan_minimal-arm.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.ubsan_minimal-armhf.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.ubsan_standalone-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.ubsan_standalone-arm.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.ubsan_standalone-armhf.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-arm.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-armhf.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.xray-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.xray-arm.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.xray-armhf.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.xray-basic-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.xray-basic-arm.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.xray-basic-armhf.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.xray-basic-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.xray-fdr-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.xray-fdr-arm.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.xray-fdr-armhf.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.xray-fdr-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.xray-profiling-aarch64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.xray-profiling-arm.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.xray-profiling-armhf.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.xray-profiling-x86_64.a
OLD_FILES+=usr/lib/clang/10.0.0/lib/freebsd/libclang_rt.xray-x86_64.a
OLD_DIRS+=usr/lib/clang/10.0.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/10.0.0/lib
OLD_DIRS+=usr/lib/clang/10.0.0
# 20200520: xform_userland.h removed
OLD_FILES+=usr/include/crypto/xform_userland.h
# 20200515: libalias cuseeme protocol support retired
OLD_LIBS+=lib/libalias_cuseeme.so
OLD_FILES+=usr/lib/libalias_cuseeme.a
OLD_FILES+=usr/lib/libalias_cuseeme_p.a
# 20200511: Remove deprecated crypto algorithms
OLD_FILES+=usr/include/crypto/cast.h
OLD_FILES+=usr/include/crypto/castsb.h
OLD_FILES+=usr/include/crypto/skipjack.h
# 20200511: Remove ubsec(4)
OLD_FILES+=usr/share/man/man4/ubsec.4.gz
# 20200428: route_var.h moved to net/route
OLD_FILES+=usr/include/net/route_var.h
# 20200418: Make libauditd private
OLD_FILES+=usr/lib/libauditd.a
OLD_FILES+=usr/lib/libauditd.so
OLD_LIBS+=usr/lib/libauditd.so.5
OLD_FILES+=usr/lib/libauditd_p.a
# 20200418: Remove bogus man links
OLD_FILES+=usr/share/man/man3/getauusernam_R.3.gz
OLD_FILES+=usr/share/man/man3/getauclassnam_3.3.gz
# 20200414: NFS file handle affinity code for the NFS server re-organized
OLD_FILES+=usr/include/nfs/nfs_fha.h
# 20200401: Remove procfs-based process debugging
OLD_FILES+=usr/include/sys/pioctl.h
# 20200330: GDB_LIBEXEC option retired (always true)
OLD_FILES+=usr/bin/gdb
OLD_FILES+=usr/bin/gdbserver
OLD_FILES+=usr/bin/kgdb
OLD_FILES+=usr/share/man/man1/gdb.1.gz
OLD_FILES+=usr/share/man/man1/gdbserver.1.gz
OLD_FILES+=usr/share/man/man1/kgdb.1.gz
# 20200327: OCF refactoring
OLD_FILES+=usr/include/crypto/cryptosoft.h
OLD_FILES+=usr/share/man/man9/crypto_find_driver.9.gz
OLD_FILES+=usr/share/man/man9/crypto_register.9.gz
OLD_FILES+=usr/share/man/man9/crypto_unregister.9.gz
# 20200326: compat libs for libl are no longer built
OLD_FILES+=usr/lib32/libfl.a
OLD_FILES+=usr/lib32/libl.a
OLD_FILES+=usr/lib32/libln.a
# 20200323: INTERNALLIB don't install headers anymore
OLD_FILES+=usr/include/libelftc.h
OLD_FILES+=usr/include/libifconfig.h
OLD_FILES+=usr/include/libpmcstat.h
# 20200320: cx and ctau drivers retired
OLD_FILES+=usr/share/man/man4/ctau.4.gz
OLD_FILES+=usr/share/man/man4/cx.4.gz
# 20200318: host.conf was deprecated a long time ago
OLD_FILES+=etc/host.conf
OLD_FILES+=etc/rc.d/nsswitch
# 20200310: new clang import which bumps version from 9.0.1 to 10.0.0
OLD_FILES+=usr/lib/clang/9.0.1/include/cuda_wrappers/algorithm
OLD_FILES+=usr/lib/clang/9.0.1/include/cuda_wrappers/complex
OLD_FILES+=usr/lib/clang/9.0.1/include/cuda_wrappers/new
OLD_DIRS+=usr/lib/clang/9.0.1/include/cuda_wrappers
OLD_FILES+=usr/lib/clang/9.0.1/include/openmp_wrappers/__clang_openmp_math.h
OLD_FILES+=usr/lib/clang/9.0.1/include/openmp_wrappers/__clang_openmp_math_declares.h
OLD_FILES+=usr/lib/clang/9.0.1/include/openmp_wrappers/cmath
OLD_FILES+=usr/lib/clang/9.0.1/include/openmp_wrappers/math.h
OLD_DIRS+=usr/lib/clang/9.0.1/include/openmp_wrappers
OLD_FILES+=usr/lib/clang/9.0.1/include/ppc_wrappers/emmintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/ppc_wrappers/mm_malloc.h
OLD_FILES+=usr/lib/clang/9.0.1/include/ppc_wrappers/mmintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/ppc_wrappers/xmmintrin.h
OLD_DIRS+=usr/lib/clang/9.0.1/include/ppc_wrappers
OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/netbsd_syscall_hooks.h
OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/9.0.1/include/sanitizer
OLD_FILES+=usr/lib/clang/9.0.1/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/9.0.1/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/9.0.1/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/9.0.1/include/__clang_cuda_device_functions.h
OLD_FILES+=usr/lib/clang/9.0.1/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/9.0.1/include/__clang_cuda_libdevice_declares.h
OLD_FILES+=usr/lib/clang/9.0.1/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/9.0.1/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/9.0.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/9.0.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/9.0.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/9.0.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/altivec.h
OLD_FILES+=usr/lib/clang/9.0.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/arm64intr.h
OLD_FILES+=usr/lib/clang/9.0.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/9.0.1/include/arm_fp16.h
OLD_FILES+=usr/lib/clang/9.0.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/9.0.1/include/armintr.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512bf16intrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vlbf16intrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vlvp2intersectintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vp2intersectintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/cetintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/cldemoteintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/9.0.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/enqcmdintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/htmintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/invpcidintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/9.0.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/9.0.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/9.0.1/include/movdirintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/msa.h
OLD_FILES+=usr/lib/clang/9.0.1/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/opencl-c-base.h
OLD_FILES+=usr/lib/clang/9.0.1/include/opencl-c.h
OLD_FILES+=usr/lib/clang/9.0.1/include/pconfigintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/ptwriteintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/s390intrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/sgxintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/vadefs.h
OLD_FILES+=usr/lib/clang/9.0.1/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/vecintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/waitpkgintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/wbnoinvdintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/xopintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/9.0.1/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/9.0.1/include
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-aarch64.so
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-arm.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-arm.so
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-armhf.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-armhf.so
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-preinit-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-preinit-arm.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-preinit-armhf.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan_cxx-arm.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan_cxx-armhf.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.cfi-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.cfi-arm.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.cfi-armhf.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.cfi-i386.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.cfi-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.cfi_diag-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.cfi_diag-arm.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.cfi_diag-armhf.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.cfi_diag-i386.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.cfi_diag-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.dd-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.dd-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.fuzzer-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.fuzzer-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.fuzzer_no_main-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.fuzzer_no_main-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.msan-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.msan-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.msan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.profile-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.profile-powerpc.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.profile-powerpc64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.safestack-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.stats-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.stats-arm.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.stats-armhf.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.stats_client-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.stats_client-arm.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.stats_client-armhf.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.tsan-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.tsan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_minimal-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_minimal-arm.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_minimal-armhf.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_standalone-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_standalone-arm.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_standalone-armhf.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-arm.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-armhf.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-arm.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-armhf.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-basic-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-basic-arm.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-basic-armhf.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-basic-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-fdr-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-fdr-arm.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-fdr-armhf.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-fdr-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-profiling-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-profiling-arm.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-profiling-armhf.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-profiling-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-x86_64.a
OLD_DIRS+=usr/lib/clang/9.0.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/9.0.1/lib
OLD_DIRS+=usr/lib/clang/9.0.1
# 20200309: amd(8) retired
OLD_FILES+=etc/amd.map
OLD_FILES+=etc/newsyslog.conf.d/amd.conf
OLD_FILES+=etc/rc.d/amd
OLD_FILES+=usr/bin/pawd
OLD_FILES+=usr/sbin/amd
OLD_FILES+=usr/sbin/amq
OLD_FILES+=usr/sbin/fixmount
OLD_FILES+=usr/sbin/fsinfo
OLD_FILES+=usr/sbin/hlfsd
OLD_FILES+=usr/sbin/mk-amd-map
OLD_FILES+=usr/sbin/wire-test
OLD_FILES+=usr/share/examples/etc/amd.map
OLD_FILES+=usr/share/man/man1/pawd.1.gz
OLD_FILES+=usr/share/man/man5/amd.conf.5.gz
OLD_FILES+=usr/share/man/man8/amd.8.gz
OLD_FILES+=usr/share/man/man8/amq.8.gz
OLD_FILES+=usr/share/man/man8/fixmount.8.gz
OLD_FILES+=usr/share/man/man8/fsinfo.8.gz
OLD_FILES+=usr/share/man/man8/hlfsd.8.gz
OLD_FILES+=usr/share/man/man8/mk-amd-map.8.gz
OLD_FILES+=usr/share/man/man8/wire-test.8.gz
# 20200301: bktr removed
OLD_DIRS+=usr/include/dev/bktr
OLD_FILES+=usr/include/dev/bktr/ioctl_bktr.h
OLD_FILES+=usr/include/dev/bktr/ioctl_bt848.h
OLD_FILES+=usr/include/dev/bktr/ioctl_meteor.h
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/machine/ioctl_bktr.h
OLD_FILES+=usr/include/machine/ioctl_meteor.h
.endif
OLD_FILES+=usr/share/man/man4/bktr.4.gz
OLD_FILES+=usr/share/man/man4/brooktree.4.gz
# 20200229: GCC 4.2.1 removed
OLD_FILES+=usr/bin/g++
OLD_FILES+=usr/bin/gcc
OLD_FILES+=usr/share/man/man1/g++.1.gz
OLD_FILES+=usr/share/man/man1/gcc.1.gz
OLD_FILES+=usr/bin/gcpp
OLD_FILES+=usr/bin/gperf
OLD_FILES+=usr/include/c++/4.2/algorithm
OLD_FILES+=usr/include/c++/4.2/backward/algo.h
OLD_FILES+=usr/include/c++/4.2/backward/algobase.h
OLD_FILES+=usr/include/c++/4.2/backward/alloc.h
OLD_FILES+=usr/include/c++/4.2/backward/backward_warning.h
OLD_FILES+=usr/include/c++/4.2/backward/bvector.h
OLD_FILES+=usr/include/c++/4.2/backward/complex.h
OLD_FILES+=usr/include/c++/4.2/backward/defalloc.h
OLD_FILES+=usr/include/c++/4.2/backward/deque.h
OLD_FILES+=usr/include/c++/4.2/backward/fstream.h
OLD_FILES+=usr/include/c++/4.2/backward/function.h
OLD_FILES+=usr/include/c++/4.2/backward/hash_map.h
OLD_FILES+=usr/include/c++/4.2/backward/hash_set.h
OLD_FILES+=usr/include/c++/4.2/backward/hashtable.h
OLD_FILES+=usr/include/c++/4.2/backward/heap.h
OLD_FILES+=usr/include/c++/4.2/backward/iomanip.h
OLD_FILES+=usr/include/c++/4.2/backward/iostream.h
OLD_FILES+=usr/include/c++/4.2/backward/istream.h
OLD_FILES+=usr/include/c++/4.2/backward/iterator.h
OLD_FILES+=usr/include/c++/4.2/backward/list.h
OLD_FILES+=usr/include/c++/4.2/backward/map.h
OLD_FILES+=usr/include/c++/4.2/backward/multimap.h
OLD_FILES+=usr/include/c++/4.2/backward/multiset.h
OLD_FILES+=usr/include/c++/4.2/backward/new.h
OLD_FILES+=usr/include/c++/4.2/backward/ostream.h
OLD_FILES+=usr/include/c++/4.2/backward/pair.h
OLD_FILES+=usr/include/c++/4.2/backward/queue.h
OLD_FILES+=usr/include/c++/4.2/backward/rope.h
OLD_FILES+=usr/include/c++/4.2/backward/set.h
OLD_FILES+=usr/include/c++/4.2/backward/slist.h
OLD_FILES+=usr/include/c++/4.2/backward/stack.h
OLD_FILES+=usr/include/c++/4.2/backward/stream.h
OLD_FILES+=usr/include/c++/4.2/backward/streambuf.h
OLD_FILES+=usr/include/c++/4.2/backward/strstream
OLD_FILES+=usr/include/c++/4.2/backward/tempbuf.h
OLD_FILES+=usr/include/c++/4.2/backward/tree.h
OLD_FILES+=usr/include/c++/4.2/backward/vector.h
OLD_FILES+=usr/include/c++/4.2/bits/allocator.h
OLD_FILES+=usr/include/c++/4.2/bits/atomic_word.h
OLD_FILES+=usr/include/c++/4.2/bits/basic_file.h
OLD_FILES+=usr/include/c++/4.2/bits/basic_ios.h
OLD_FILES+=usr/include/c++/4.2/bits/basic_ios.tcc
OLD_FILES+=usr/include/c++/4.2/bits/basic_string.h
OLD_FILES+=usr/include/c++/4.2/bits/basic_string.tcc
OLD_FILES+=usr/include/c++/4.2/bits/boost_concept_check.h
OLD_FILES+=usr/include/c++/4.2/bits/c++allocator.h
OLD_FILES+=usr/include/c++/4.2/bits/c++config.h
OLD_FILES+=usr/include/c++/4.2/bits/c++io.h
OLD_FILES+=usr/include/c++/4.2/bits/c++locale.h
OLD_FILES+=usr/include/c++/4.2/bits/c++locale_internal.h
OLD_FILES+=usr/include/c++/4.2/bits/char_traits.h
OLD_FILES+=usr/include/c++/4.2/bits/cmath.tcc
OLD_FILES+=usr/include/c++/4.2/bits/codecvt.h
OLD_FILES+=usr/include/c++/4.2/bits/compatibility.h
OLD_FILES+=usr/include/c++/4.2/bits/concept_check.h
OLD_FILES+=usr/include/c++/4.2/bits/cpp_type_traits.h
OLD_FILES+=usr/include/c++/4.2/bits/cpu_defines.h
OLD_FILES+=usr/include/c++/4.2/bits/ctype_base.h
OLD_FILES+=usr/include/c++/4.2/bits/ctype_inline.h
OLD_FILES+=usr/include/c++/4.2/bits/ctype_noninline.h
OLD_FILES+=usr/include/c++/4.2/bits/cxxabi_tweaks.h
OLD_FILES+=usr/include/c++/4.2/bits/deque.tcc
OLD_FILES+=usr/include/c++/4.2/bits/fstream.tcc
OLD_FILES+=usr/include/c++/4.2/bits/functexcept.h
OLD_FILES+=usr/include/c++/4.2/bits/gslice.h
OLD_FILES+=usr/include/c++/4.2/bits/gslice_array.h
OLD_FILES+=usr/include/c++/4.2/bits/gthr-default.h
OLD_FILES+=usr/include/c++/4.2/bits/gthr-posix.h
OLD_FILES+=usr/include/c++/4.2/bits/gthr-single.h
OLD_FILES+=usr/include/c++/4.2/bits/gthr-tpf.h
OLD_FILES+=usr/include/c++/4.2/bits/gthr.h
OLD_FILES+=usr/include/c++/4.2/bits/indirect_array.h
OLD_FILES+=usr/include/c++/4.2/bits/ios_base.h
OLD_FILES+=usr/include/c++/4.2/bits/istream.tcc
OLD_FILES+=usr/include/c++/4.2/bits/list.tcc
OLD_FILES+=usr/include/c++/4.2/bits/locale_classes.h
OLD_FILES+=usr/include/c++/4.2/bits/locale_facets.h
OLD_FILES+=usr/include/c++/4.2/bits/locale_facets.tcc
OLD_FILES+=usr/include/c++/4.2/bits/localefwd.h
OLD_FILES+=usr/include/c++/4.2/bits/mask_array.h
OLD_FILES+=usr/include/c++/4.2/bits/messages_members.h
OLD_FILES+=usr/include/c++/4.2/bits/os_defines.h
OLD_FILES+=usr/include/c++/4.2/bits/ostream.tcc
OLD_FILES+=usr/include/c++/4.2/bits/ostream_insert.h
OLD_FILES+=usr/include/c++/4.2/bits/postypes.h
OLD_FILES+=usr/include/c++/4.2/bits/slice_array.h
OLD_FILES+=usr/include/c++/4.2/bits/sstream.tcc
OLD_FILES+=usr/include/c++/4.2/bits/stl_algo.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_algobase.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_bvector.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_construct.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_deque.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_function.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_heap.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_iterator.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_iterator_base_funcs.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_iterator_base_types.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_list.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_map.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_multimap.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_multiset.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_numeric.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_pair.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_queue.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_raw_storage_iter.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_relops.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_set.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_stack.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_tempbuf.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_tree.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_uninitialized.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_vector.h
OLD_FILES+=usr/include/c++/4.2/bits/stream_iterator.h
OLD_FILES+=usr/include/c++/4.2/bits/streambuf.tcc
OLD_FILES+=usr/include/c++/4.2/bits/streambuf_iterator.h
OLD_FILES+=usr/include/c++/4.2/bits/stringfwd.h
OLD_FILES+=usr/include/c++/4.2/bits/time_members.h
OLD_FILES+=usr/include/c++/4.2/bits/valarray_after.h
OLD_FILES+=usr/include/c++/4.2/bits/valarray_array.h
OLD_FILES+=usr/include/c++/4.2/bits/valarray_array.tcc
OLD_FILES+=usr/include/c++/4.2/bits/valarray_before.h
OLD_FILES+=usr/include/c++/4.2/bits/vector.tcc
OLD_FILES+=usr/include/c++/4.2/bitset
OLD_FILES+=usr/include/c++/4.2/cassert
OLD_FILES+=usr/include/c++/4.2/cctype
OLD_FILES+=usr/include/c++/4.2/cerrno
OLD_FILES+=usr/include/c++/4.2/cfloat
OLD_FILES+=usr/include/c++/4.2/ciso646
OLD_FILES+=usr/include/c++/4.2/climits
OLD_FILES+=usr/include/c++/4.2/clocale
OLD_FILES+=usr/include/c++/4.2/cmath
OLD_FILES+=usr/include/c++/4.2/complex
OLD_FILES+=usr/include/c++/4.2/csetjmp
OLD_FILES+=usr/include/c++/4.2/csignal
OLD_FILES+=usr/include/c++/4.2/cstdarg
OLD_FILES+=usr/include/c++/4.2/cstddef
OLD_FILES+=usr/include/c++/4.2/cstdio
OLD_FILES+=usr/include/c++/4.2/cstdlib
OLD_FILES+=usr/include/c++/4.2/cstring
OLD_FILES+=usr/include/c++/4.2/ctime
OLD_FILES+=usr/include/c++/4.2/cwchar
OLD_FILES+=usr/include/c++/4.2/cwctype
OLD_FILES+=usr/include/c++/4.2/cxxabi.h
OLD_FILES+=usr/include/c++/4.2/debug/bitset
OLD_FILES+=usr/include/c++/4.2/debug/debug.h
OLD_FILES+=usr/include/c++/4.2/debug/deque
OLD_FILES+=usr/include/c++/4.2/debug/formatter.h
OLD_FILES+=usr/include/c++/4.2/debug/functions.h
OLD_FILES+=usr/include/c++/4.2/debug/hash_map
OLD_FILES+=usr/include/c++/4.2/debug/hash_map.h
OLD_FILES+=usr/include/c++/4.2/debug/hash_multimap.h
OLD_FILES+=usr/include/c++/4.2/debug/hash_multiset.h
OLD_FILES+=usr/include/c++/4.2/debug/hash_set
OLD_FILES+=usr/include/c++/4.2/debug/hash_set.h
OLD_FILES+=usr/include/c++/4.2/debug/list
OLD_FILES+=usr/include/c++/4.2/debug/macros.h
OLD_FILES+=usr/include/c++/4.2/debug/map
OLD_FILES+=usr/include/c++/4.2/debug/map.h
OLD_FILES+=usr/include/c++/4.2/debug/multimap.h
OLD_FILES+=usr/include/c++/4.2/debug/multiset.h
OLD_FILES+=usr/include/c++/4.2/debug/safe_base.h
OLD_FILES+=usr/include/c++/4.2/debug/safe_iterator.h
OLD_FILES+=usr/include/c++/4.2/debug/safe_iterator.tcc
OLD_FILES+=usr/include/c++/4.2/debug/safe_sequence.h
OLD_FILES+=usr/include/c++/4.2/debug/set
OLD_FILES+=usr/include/c++/4.2/debug/set.h
OLD_FILES+=usr/include/c++/4.2/debug/string
OLD_FILES+=usr/include/c++/4.2/debug/vector
OLD_FILES+=usr/include/c++/4.2/deque
OLD_FILES+=usr/include/c++/4.2/exception
OLD_FILES+=usr/include/c++/4.2/exception_defines.h
OLD_FILES+=usr/include/c++/4.2/ext/algorithm
OLD_FILES+=usr/include/c++/4.2/ext/array_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/atomicity.h
OLD_FILES+=usr/include/c++/4.2/ext/bitmap_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/codecvt_specializations.h
OLD_FILES+=usr/include/c++/4.2/ext/concurrence.h
OLD_FILES+=usr/include/c++/4.2/ext/debug_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/functional
OLD_FILES+=usr/include/c++/4.2/ext/hash_fun.h
OLD_FILES+=usr/include/c++/4.2/ext/hash_map
OLD_FILES+=usr/include/c++/4.2/ext/hash_set
OLD_FILES+=usr/include/c++/4.2/ext/hashtable.h
OLD_FILES+=usr/include/c++/4.2/ext/iterator
OLD_FILES+=usr/include/c++/4.2/ext/malloc_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/memory
OLD_FILES+=usr/include/c++/4.2/ext/mt_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/new_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/numeric
OLD_FILES+=usr/include/c++/4.2/ext/numeric_traits.h
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/assoc_container.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/basic_tree_policy/basic_tree_policy_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/basic_tree_policy/null_node_metadata.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/basic_tree_policy/traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/basic_types.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/bin_search_tree_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/cond_dtor_entry_dealtor.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/cond_key_dtor_entry_dealtor.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/iterators_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/node_iterators.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/point_iterators.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/policy_access_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/r_erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/rotate_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/binary_heap_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/const_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/const_point_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/entry_cmp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/entry_pred.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/iterators_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/policy_access_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/resize_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_/binomial_heap_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/binomial_heap_base_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/cc_ht_map_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/cmp_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/cond_key_dtor_entry_dealtor.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/constructor_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/constructor_destructor_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/constructor_destructor_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/debug_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/debug_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/entry_list_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/erase_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/erase_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/find_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/insert_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/insert_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/iterators_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/policy_access_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/resize_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/resize_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/resize_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/size_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/standard_policies.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cond_dealtor.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/container_base_dispatch.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/eq_fn/eq_by_less.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/eq_fn/hash_eq_fn.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/constructor_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/constructor_destructor_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/constructor_destructor_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/debug_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/debug_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/erase_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/erase_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/find_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/find_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/gp_ht_map_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/insert_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/insert_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/iterator_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/policy_access_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/resize_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/resize_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/resize_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/standard_policies.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/direct_mask_range_hashing_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/direct_mod_range_hashing_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/linear_probe_fn_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/mask_based_range_hashing.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/mod_based_range_hashing.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/probe_fn_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/quadratic_probe_fn_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/ranged_hash_fn.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/ranged_probe_fn.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/sample_probe_fn.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/sample_range_hashing.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/sample_ranged_hash_fn.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/sample_ranged_probe_fn.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/const_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/const_point_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/iterators_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/left_child_next_sibling_heap_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/node.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/null_metadata.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/policy_access_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/constructor_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/entry_metadata_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/iterators_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/lu_map_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_policy/counter_lu_metadata.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_policy/counter_lu_policy_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_policy/mtf_lu_policy_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_policy/sample_update_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/map_debug_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/cond_dtor.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/iterators_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/node_iterators.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/ov_tree_map_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/policy_access_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/pairing_heap_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/child_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/cond_dtor_entry_dealtor.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/const_child_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/head.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/insert_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/internal_node.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/iterators_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/leaf.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/node_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/node_iterators.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/node_metadata_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/pat_trie_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/point_iterators.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/policy_access_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/r_erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/rotate_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/split_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/split_join_branch_bag.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/synth_e_access_traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/update_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/priority_queue_base_dispatch.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/node.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/rb_tree_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/rc.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/rc_binomial_heap_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/cc_hash_max_collision_check_resize_trigger_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/hash_exponential_size_policy_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/hash_load_check_resize_trigger_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/hash_load_check_resize_trigger_size_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/hash_prime_size_policy_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/hash_standard_resize_policy_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/sample_resize_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/sample_resize_trigger.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/sample_size_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/node.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/splay_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/splay_tree_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/standard_policies.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/thin_heap_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/tree_policy/node_metadata_selector.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/tree_policy/null_node_update_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/tree_policy/order_statistics_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/tree_policy/sample_tree_node_update.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/tree_trace_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/node_metadata_selector.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/null_node_update_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/order_statistics_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/prefix_search_node_update_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/sample_trie_e_access_traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/sample_trie_node_update.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/string_trie_e_access_traits_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/trie_policy_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/type_utils.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/types_traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/unordered_iterator/const_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/unordered_iterator/const_point_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/unordered_iterator/iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/unordered_iterator/point_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/exception.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/hash_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/list_update_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/priority_queue.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/tag_and_trait.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/tree_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/trie_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pod_char_traits.h
OLD_FILES+=usr/include/c++/4.2/ext/pool_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/rb_tree
OLD_FILES+=usr/include/c++/4.2/ext/rc_string_base.h
OLD_FILES+=usr/include/c++/4.2/ext/rope
OLD_FILES+=usr/include/c++/4.2/ext/ropeimpl.h
OLD_FILES+=usr/include/c++/4.2/ext/slist
OLD_FILES+=usr/include/c++/4.2/ext/sso_string_base.h
OLD_FILES+=usr/include/c++/4.2/ext/stdio_filebuf.h
OLD_FILES+=usr/include/c++/4.2/ext/stdio_sync_filebuf.h
OLD_FILES+=usr/include/c++/4.2/ext/throw_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/type_traits.h
OLD_FILES+=usr/include/c++/4.2/ext/typelist.h
OLD_FILES+=usr/include/c++/4.2/ext/vstring.h
OLD_FILES+=usr/include/c++/4.2/ext/vstring.tcc
OLD_FILES+=usr/include/c++/4.2/ext/vstring_fwd.h
OLD_FILES+=usr/include/c++/4.2/ext/vstring_util.h
OLD_FILES+=usr/include/c++/4.2/fstream
OLD_FILES+=usr/include/c++/4.2/functional
OLD_FILES+=usr/include/c++/4.2/iomanip
OLD_FILES+=usr/include/c++/4.2/ios
OLD_FILES+=usr/include/c++/4.2/iosfwd
OLD_FILES+=usr/include/c++/4.2/iostream
OLD_FILES+=usr/include/c++/4.2/istream
OLD_FILES+=usr/include/c++/4.2/iterator
OLD_FILES+=usr/include/c++/4.2/limits
OLD_FILES+=usr/include/c++/4.2/list
OLD_FILES+=usr/include/c++/4.2/locale
OLD_FILES+=usr/include/c++/4.2/map
OLD_FILES+=usr/include/c++/4.2/memory
OLD_FILES+=usr/include/c++/4.2/new
OLD_FILES+=usr/include/c++/4.2/numeric
OLD_FILES+=usr/include/c++/4.2/ostream
OLD_FILES+=usr/include/c++/4.2/queue
OLD_FILES+=usr/include/c++/4.2/set
OLD_FILES+=usr/include/c++/4.2/sstream
OLD_FILES+=usr/include/c++/4.2/stack
OLD_FILES+=usr/include/c++/4.2/stdexcept
OLD_FILES+=usr/include/c++/4.2/streambuf
OLD_FILES+=usr/include/c++/4.2/string
OLD_FILES+=usr/include/c++/4.2/tr1/array
OLD_FILES+=usr/include/c++/4.2/tr1/bind_iterate.h
OLD_FILES+=usr/include/c++/4.2/tr1/bind_repeat.h
OLD_FILES+=usr/include/c++/4.2/tr1/boost_shared_ptr.h
OLD_FILES+=usr/include/c++/4.2/tr1/cctype
OLD_FILES+=usr/include/c++/4.2/tr1/cfenv
OLD_FILES+=usr/include/c++/4.2/tr1/cfloat
OLD_FILES+=usr/include/c++/4.2/tr1/cinttypes
OLD_FILES+=usr/include/c++/4.2/tr1/climits
OLD_FILES+=usr/include/c++/4.2/tr1/cmath
OLD_FILES+=usr/include/c++/4.2/tr1/common.h
OLD_FILES+=usr/include/c++/4.2/tr1/complex
OLD_FILES+=usr/include/c++/4.2/tr1/cstdarg
OLD_FILES+=usr/include/c++/4.2/tr1/cstdbool
OLD_FILES+=usr/include/c++/4.2/tr1/cstdint
OLD_FILES+=usr/include/c++/4.2/tr1/cstdio
OLD_FILES+=usr/include/c++/4.2/tr1/cstdlib
OLD_FILES+=usr/include/c++/4.2/tr1/ctgmath
OLD_FILES+=usr/include/c++/4.2/tr1/ctime
OLD_FILES+=usr/include/c++/4.2/tr1/ctype.h
OLD_FILES+=usr/include/c++/4.2/tr1/cwchar
OLD_FILES+=usr/include/c++/4.2/tr1/cwctype
OLD_FILES+=usr/include/c++/4.2/tr1/fenv.h
OLD_FILES+=usr/include/c++/4.2/tr1/float.h
OLD_FILES+=usr/include/c++/4.2/tr1/functional
OLD_FILES+=usr/include/c++/4.2/tr1/functional_hash.h
OLD_FILES+=usr/include/c++/4.2/tr1/functional_iterate.h
OLD_FILES+=usr/include/c++/4.2/tr1/hashtable
OLD_FILES+=usr/include/c++/4.2/tr1/hashtable_policy.h
OLD_FILES+=usr/include/c++/4.2/tr1/inttypes.h
OLD_FILES+=usr/include/c++/4.2/tr1/limits.h
OLD_FILES+=usr/include/c++/4.2/tr1/math.h
OLD_FILES+=usr/include/c++/4.2/tr1/memory
OLD_FILES+=usr/include/c++/4.2/tr1/mu_iterate.h
OLD_FILES+=usr/include/c++/4.2/tr1/random
OLD_FILES+=usr/include/c++/4.2/tr1/random.tcc
OLD_FILES+=usr/include/c++/4.2/tr1/ref_fwd.h
OLD_FILES+=usr/include/c++/4.2/tr1/ref_wrap_iterate.h
OLD_FILES+=usr/include/c++/4.2/tr1/repeat.h
OLD_FILES+=usr/include/c++/4.2/tr1/stdarg.h
OLD_FILES+=usr/include/c++/4.2/tr1/stdbool.h
OLD_FILES+=usr/include/c++/4.2/tr1/stdint.h
OLD_FILES+=usr/include/c++/4.2/tr1/stdio.h
OLD_FILES+=usr/include/c++/4.2/tr1/stdlib.h
OLD_FILES+=usr/include/c++/4.2/tr1/tgmath.h
OLD_FILES+=usr/include/c++/4.2/tr1/tuple
OLD_FILES+=usr/include/c++/4.2/tr1/tuple_defs.h
OLD_FILES+=usr/include/c++/4.2/tr1/tuple_iterate.h
OLD_FILES+=usr/include/c++/4.2/tr1/type_traits
OLD_FILES+=usr/include/c++/4.2/tr1/type_traits_fwd.h
OLD_FILES+=usr/include/c++/4.2/tr1/unordered_map
OLD_FILES+=usr/include/c++/4.2/tr1/unordered_set
OLD_FILES+=usr/include/c++/4.2/tr1/utility
OLD_FILES+=usr/include/c++/4.2/tr1/wchar.h
OLD_FILES+=usr/include/c++/4.2/tr1/wctype.h
OLD_FILES+=usr/include/c++/4.2/typeinfo
OLD_FILES+=usr/include/c++/4.2/utility
OLD_FILES+=usr/include/c++/4.2/valarray
OLD_FILES+=usr/include/c++/4.2/vector
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/gcc/4.2/__wmmintrin_aes.h
OLD_FILES+=usr/include/gcc/4.2/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/gcc/4.2/ammintrin.h
OLD_FILES+=usr/include/gcc/4.2/emmintrin.h
OLD_FILES+=usr/include/gcc/4.2/mm3dnow.h
OLD_FILES+=usr/include/gcc/4.2/mm_malloc.h
OLD_FILES+=usr/include/gcc/4.2/mmintrin.h
OLD_FILES+=usr/include/gcc/4.2/pmmintrin.h
OLD_FILES+=usr/include/gcc/4.2/tmmintrin.h
OLD_FILES+=usr/include/gcc/4.2/wmmintrin.h
OLD_FILES+=usr/include/gcc/4.2/xmmintrin.h
.elif ${TARGET_ARCH} == "arm"
OLD_FILES+=usr/include/gcc/4.2/mmintrin.h
.elif ${TARGET_ARCH} == "powerpc" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/include/gcc/4.2/altivec.h
OLD_FILES+=usr/include/gcc/4.2/ppc-asm.h
OLD_FILES+=usr/include/gcc/4.2/spe.h
.endif
OLD_FILES+=usr/lib/libgcov.a
OLD_FILES+=usr/lib/libgomp.a
OLD_FILES+=usr/lib/libstdc++.a
OLD_FILES+=usr/lib/libstdc++.so
OLD_LIBS+=usr/lib/libstdc++.so.6
OLD_FILES+=usr/lib/libstdc++_p.a
OLD_FILES+=usr/lib/libsupc++.a
OLD_FILES+=usr/lib/libsupc++.so
OLD_LIBS+=usr/lib/libsupc++.so.1
OLD_FILES+=usr/lib/libsupc++_p.a
OLD_LIBS+=usr/lib/libgomp.so.1
OLD_FILES+=usr/lib/libgomp_p.a
OLD_FILES+=usr/libexec/cc1
OLD_FILES+=usr/libexec/cc1plus
OLD_FILES+=usr/share/man/man1/gcpp.1.gz
OLD_FILES+=usr/share/man/man1/gperf.1.gz
OLD_FILES+=usr/share/man/man7/gperf.7.gz
OLD_DIRS+=usr/include/c++/4.2/tr1
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/unordered_iterator
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/tree_policy
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_policy
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/eq_fn
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail/basic_tree_policy
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds/detail
OLD_DIRS+=usr/include/c++/4.2/ext/pb_ds
OLD_DIRS+=usr/include/c++/4.2/ext
OLD_DIRS+=usr/include/c++/4.2/debug
OLD_DIRS+=usr/include/c++/4.2/bits
OLD_DIRS+=usr/include/c++/4.2/backward
OLD_DIRS+=usr/include/c++/4.2
OLD_DIRS+=usr/include/gcc/4.2
OLD_DIRS+=usr/include/gcc
# 20200220: Upgrade of ncurses, shlib bumped to version 9
OLD_LIBS+=lib/libncurses.so.8
OLD_LIBS+=lib/libncursesw.so.8
# 20200206: Remove elf2aout
OLD_FILES+=usr/bin/elf2aout
OLD_FILES+=usr/share/man/man1/elf2aout.1.gz
# 20200204: simple_httpd removed
OLD_FILES+=usr/sbin/simple_httpd
# 20200127: vpo removed
OLD_FILES+=usr/include/dev/ppbus/vpoio.h
OLD_FILES+=usr/share/man/man4/imm.4.gz
OLD_FILES+=usr/share/man/man4/vpo.4.gz
# 20200104: gcc libssp removed
OLD_FILES+=usr/include/ssp/ssp.h
OLD_FILES+=usr/include/ssp/stdio.h
OLD_FILES+=usr/include/ssp/string.h
OLD_FILES+=usr/include/ssp/unistd.h
OLD_DIRS+=usr/include/ssp
OLD_FILES+=usr/lib/libssp.a
# 20191229: GEOM_SCHED class and gsched tool removed
OLD_LIBS+=lib/geom/geom_sched.so
OLD_FILES+=sbin/gsched
OLD_FILES+=usr/share/man/man8/gsched.8.gz
# 20191222: new clang import which bumps version from 9.0.0 to 9.0.1
OLD_FILES+=usr/lib/clang/9.0.0/include/cuda_wrappers/algorithm
OLD_FILES+=usr/lib/clang/9.0.0/include/cuda_wrappers/complex
OLD_FILES+=usr/lib/clang/9.0.0/include/cuda_wrappers/new
OLD_DIRS+=usr/lib/clang/9.0.0/include/cuda_wrappers
OLD_FILES+=usr/lib/clang/9.0.0/include/openmp_wrappers/__clang_openmp_math.h
OLD_FILES+=usr/lib/clang/9.0.0/include/openmp_wrappers/__clang_openmp_math_declares.h
OLD_FILES+=usr/lib/clang/9.0.0/include/openmp_wrappers/cmath
OLD_FILES+=usr/lib/clang/9.0.0/include/openmp_wrappers/math.h
OLD_DIRS+=usr/lib/clang/9.0.0/include/openmp_wrappers
OLD_FILES+=usr/lib/clang/9.0.0/include/ppc_wrappers/emmintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/ppc_wrappers/mm_malloc.h
OLD_FILES+=usr/lib/clang/9.0.0/include/ppc_wrappers/mmintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/ppc_wrappers/xmmintrin.h
OLD_DIRS+=usr/lib/clang/9.0.0/include/ppc_wrappers
OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/netbsd_syscall_hooks.h
OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/9.0.0/include/sanitizer
OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_device_functions.h
OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_libdevice_declares.h
OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/9.0.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/9.0.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/9.0.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/9.0.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/altivec.h
OLD_FILES+=usr/lib/clang/9.0.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/arm64intr.h
OLD_FILES+=usr/lib/clang/9.0.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/9.0.0/include/arm_fp16.h
OLD_FILES+=usr/lib/clang/9.0.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/9.0.0/include/armintr.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512bf16intrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlbf16intrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlvp2intersectintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vp2intersectintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/cetintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/cldemoteintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/9.0.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/enqcmdintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/invpcidintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/9.0.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/9.0.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/9.0.0/include/movdirintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/msa.h
OLD_FILES+=usr/lib/clang/9.0.0/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/opencl-c-base.h
OLD_FILES+=usr/lib/clang/9.0.0/include/opencl-c.h
OLD_FILES+=usr/lib/clang/9.0.0/include/pconfigintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/ptwriteintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/sgxintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/9.0.0/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/waitpkgintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/wbnoinvdintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/9.0.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/9.0.0/include
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-aarch64.so
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-arm.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-arm.so
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-armhf.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-armhf.so
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-preinit-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-preinit-arm.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-preinit-armhf.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan_cxx-arm.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan_cxx-armhf.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi-arm.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi-armhf.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi-i386.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi_diag-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi_diag-arm.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi_diag-armhf.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi_diag-i386.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi_diag-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.dd-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.dd-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.fuzzer-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.fuzzer-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.fuzzer_no_main-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.fuzzer_no_main-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.msan-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.msan-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.msan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.profile-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.profile-powerpc.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.profile-powerpc64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.safestack-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats-arm.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats-armhf.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats_client-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats_client-arm.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats_client-armhf.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.tsan-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.tsan_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_minimal-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_minimal-arm.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_minimal-armhf.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone-arm.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone-armhf.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-arm.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-armhf.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-arm.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-armhf.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-basic-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-basic-arm.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-basic-armhf.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-basic-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-fdr-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-fdr-arm.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-fdr-armhf.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-fdr-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-profiling-aarch64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-profiling-arm.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-profiling-armhf.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-profiling-x86_64.a
OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-x86_64.a
OLD_DIRS+=usr/lib/clang/9.0.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/9.0.0/lib
OLD_DIRS+=usr/lib/clang/9.0.0
# 20191221: Update libpcap from 1.9.0 to 1.9.1
OLD_FILES+=usr/share/man/man3/pcap_set_immediate_mode.3.gz
OLD_FILES+=usr/share/man/man3/pcap_set_protocol.3.gz
# 20191214: Removal of sranddev(3)
OLD_FILES+=usr/share/man/man3/sranddev.3.gz
# 20191213: Renamed (BIT|CPU)_NAND to (BIT|CPU)_ANDNOT
OLD_FILES+=usr/share/man/man9/BIT_NAND.9.gz
OLD_FILES+=usr/share/man/man9/CPU_NAND.9.gz
# 20191213: remove timeout(9)
OLD_FILES+=usr/share/man/man9/callout_handle_init.9.gz
OLD_FILES+=usr/share/man/man9/timeout.9.gz
OLD_FILES+=usr/share/man/man9/untimeout.9.gz
# 20191128: Removal of trm(4)
OLD_FILES+=usr/share/man/man4/trm.4.gz
# 20191121: Removal of sio(4)
OLD_FILES+=usr/share/man/man4/sio.4.gz
# 20191105: picobsd(8), et al, removed
OLD_FILES+=usr/share/man/man8/picobsd.8.gz
# 20191017: taskqueue_start_threads_pinned became taskqueue_start_threads_cpuset
OLD_FILES+=usr/share/man/man9/taskqueue_start_threads_pinned.9.gz
# 20191009: new clang import which bumps version from 8.0.1 to 9.0.0
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/netbsd_syscall_hooks.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/8.0.1/include/sanitizer
OLD_FILES+=usr/lib/clang/8.0.1/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/8.0.1/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/8.0.1/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/8.0.1/include/__clang_cuda_device_functions.h
OLD_FILES+=usr/lib/clang/8.0.1/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/8.0.1/include/__clang_cuda_libdevice_declares.h
OLD_FILES+=usr/lib/clang/8.0.1/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/8.0.1/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/8.0.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/8.0.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/8.0.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/8.0.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/altivec.h
OLD_FILES+=usr/lib/clang/8.0.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/arm64intr.h
OLD_FILES+=usr/lib/clang/8.0.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/8.0.1/include/arm_fp16.h
OLD_FILES+=usr/lib/clang/8.0.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/8.0.1/include/armintr.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/cetintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/cldemoteintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/8.0.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/htmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/invpcidintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/8.0.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/8.0.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/8.0.1/include/movdirintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/msa.h
OLD_FILES+=usr/lib/clang/8.0.1/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/opencl-c.h
OLD_FILES+=usr/lib/clang/8.0.1/include/pconfigintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/ptwriteintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/s390intrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sgxintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/vadefs.h
OLD_FILES+=usr/lib/clang/8.0.1/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/vecintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/waitpkgintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/wbnoinvdintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/xopintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/8.0.1/include
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.msan-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.profile-aarch64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/8.0.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/8.0.1/lib
OLD_DIRS+=usr/lib/clang/8.0.1
# 20191009: libc++ 9.0.0 removed some experimental files
OLD_FILES+=usr/include/c++/v1/experimental/any
OLD_FILES+=usr/include/c++/v1/experimental/chrono
OLD_FILES+=usr/include/c++/v1/experimental/numeric
OLD_FILES+=usr/include/c++/v1/experimental/optional
OLD_FILES+=usr/include/c++/v1/experimental/ratio
OLD_FILES+=usr/include/c++/v1/experimental/string_view
OLD_FILES+=usr/include/c++/v1/experimental/system_error
OLD_FILES+=usr/include/c++/v1/experimental/tuple
OLD_FILES+=usr/lib/libc++fs.a
# 20191003: Remove useless ZFS tests
OLD_FILES+=usr/tests/sys/cddl/zfs/tests/cli_root/zpool_create/zpool_create_013_neg.ksh
OLD_FILES+=usr/tests/sys/cddl/zfs/tests/cli_root/zpool_create/zpool_create_014_neg.ksh
OLD_FILES+=usr/tests/sys/cddl/zfs/tests/cli_root/zpool_create/zpool_create_016_pos.ksh
# 20190910: mklocale(1) and colldef(1) removed
OLD_FILES+=usr/bin/mklocale
OLD_FILES+=usr/share/man/man1/mklocale.1.gz
OLD_FILES+=usr/bin/colldef
OLD_FILES+=usr/share/man/man1/colldef.1.gz
# 20190904: Remove boot1.efifat and gptboot.efifat (which never should have been)
OLD_FILES+=boot/boot1.efifat
OLD_FILES+=boot/gptboot.efifat
# 20190903: pc-sysinstall(8) removed
OLD_FILES+=usr/share/examples/pc-sysinstall/README
OLD_FILES+=usr/share/examples/pc-sysinstall/pc-autoinstall.conf
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.fbsd-netinstall
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.geli
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.gmirror
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.netinstall
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.restore
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.rsync
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.upgrade
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.zfs
OLD_FILES+=usr/share/man/man8/pc-sysinstall.8.gz
OLD_FILES+=usr/share/pc-sysinstall/backend-partmanager/create-part.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-partmanager/delete-part.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/detect-emulation.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/detect-laptop.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/detect-nics.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/disk-info.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/disk-list.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/disk-part.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/enable-net.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/get-packages.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/list-components.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/list-config.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/list-mirrors.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/list-packages.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/list-rsync-backups.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/list-tzones.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/query-langs.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/send-logs.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/setup-ssh-keys.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/set-mirror.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/sys-mem.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/test-live.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/test-netup.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/update-part-list.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/xkeyboard-layouts.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/xkeyboard-models.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/xkeyboard-variants.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-bsdlabel.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-cleanup.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-disk.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-extractimage.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-ftp.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-installcomponents.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-installpackages.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-localize.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-mountdisk.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-mountoptical.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-networking.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-newfs.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-parse.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-packages.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-runcommands.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-unmount.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-upgrade.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-users.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/installimage.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/parseconfig.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/startautoinstall.sh
OLD_FILES+=usr/share/pc-sysinstall/conf/avail-langs
OLD_FILES+=usr/share/pc-sysinstall/conf/exclude-from-upgrade
OLD_FILES+=usr/share/pc-sysinstall/conf/license/bsd-en.txt
OLD_FILES+=usr/share/pc-sysinstall/conf/license/intel-en.txt
OLD_FILES+=usr/share/pc-sysinstall/conf/license/nvidia-en.txt
OLD_FILES+=usr/share/pc-sysinstall/conf/pc-sysinstall.conf
OLD_FILES+=usr/share/pc-sysinstall/doc/help-disk-list
OLD_FILES+=usr/share/pc-sysinstall/doc/help-disk-size
OLD_FILES+=usr/share/pc-sysinstall/doc/help-index
OLD_FILES+=usr/share/pc-sysinstall/doc/help-start-autoinstall
OLD_FILES+=usr/sbin/pc-sysinstall
OLD_DIRS+=usr/share/examples/pc-sysinstall
OLD_DIRS+=usr/share/pc-sysinstall/backend
OLD_DIRS+=usr/share/pc-sysinstall/backend-partmanager
OLD_DIRS+=usr/share/pc-sysinstall/backend-query
OLD_DIRS+=usr/share/pc-sysinstall/conf/license
OLD_DIRS+=usr/share/pc-sysinstall/conf
OLD_DIRS+=usr/share/pc-sysinstall/doc
OLD_DIRS+=usr/share/pc-sysinstall
# 20190825: zlib 1.0.4 removed from kernel
OLD_FILES+=usr/include/sys/zlib.h
OLD_FILES+=usr/include/sys/zutil.h
# 20190817: pft_ping.py and sniffer.py moved to /usr/tests/sys/netpfil/common
OLD_FILES+=usr/tests/sys/netpfil/pf/sniffer.py
OLD_FILES+=usr/tests/sys/netpfil/pf/pft_ping.py
# 20190816: dir.h removed from POSIX
OLD_FILES+=usr/include/sys/dir.h
# 20190813: deprecated GEOM classes removed
OLD_FILES+=usr/share/man/man4/geom_fox.4.gz
# 20190729: gzip'ed a.out support removed
OLD_FILES+=usr/include/sys/inflate.h
# 20190722: cap_random(3) removed
OLD_LIBS+=lib/casper/libcap_random.so.1
OLD_FILES+=usr/include/casper/cap_random.h
OLD_LIBS+=usr/lib/libcap_random.so
OLD_FILES+=usr/share/man/man3/libcap_random.3.gz
OLD_FILES+=usr/share/man/man3/cap_random.3.gz
OLD_FILES+=usr/share/man/man3/cap_random_buf.3.gz
# 20190708: vm_page_hold() and _unhold() removed
OLD_FILES+=usr/share/man/man9/vm_page_hold.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_unhold.9.gz
# 20190625: Remove NAND and NANDFS support
OLD_FILES+=usr/share/man/man4/nand.4.gz
OLD_FILES+=usr/share/man/man4/nandsim.4.gz
# 20190618: sys/capability.h removed (sys/capsicum.h is the one to use)
OLD_FILES+=usr/include/sys/capability.h
# 20190615: sys/pwm.h renamed to dev/pwmc.h
OLD_FILES+=usr/include/sys/pwm.h
# 20190612: new clang import which bumps version from 8.0.0 to 8.0.1
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/netbsd_syscall_hooks.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/8.0.0/include/sanitizer
OLD_FILES+=usr/lib/clang/8.0.0/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/8.0.0/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/8.0.0/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/8.0.0/include/__clang_cuda_device_functions.h
OLD_FILES+=usr/lib/clang/8.0.0/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/8.0.0/include/__clang_cuda_libdevice_declares.h
OLD_FILES+=usr/lib/clang/8.0.0/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/8.0.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/8.0.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/8.0.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/8.0.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/8.0.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/altivec.h
OLD_FILES+=usr/lib/clang/8.0.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/arm64intr.h
OLD_FILES+=usr/lib/clang/8.0.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/8.0.0/include/arm_fp16.h
OLD_FILES+=usr/lib/clang/8.0.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/8.0.0/include/armintr.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/cetintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/cldemoteintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/8.0.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/invpcidintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/8.0.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/8.0.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/8.0.0/include/movdirintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/msa.h
OLD_FILES+=usr/lib/clang/8.0.0/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/opencl-c.h
OLD_FILES+=usr/lib/clang/8.0.0/include/pconfigintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/ptwriteintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sgxintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/8.0.0/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/waitpkgintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/wbnoinvdintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/8.0.0/include
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.msan-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/8.0.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/8.0.0/lib
OLD_DIRS+=usr/lib/clang/8.0.0
# 20190523: Remove obsolete kgzip and support files
OLD_FILES+=usr/sbin/kgzip
OLD_FILES+=usr/lib/kgzldr.o
OLD_FILES+=usr/share/man/man8/kgzip.8.gz
# 20190517: Remove obsolete 10 and 10/100 ethernet drivers
OLD_FILES+=usr/share/man/man4/bm.4.gz
OLD_FILES+=usr/share/man/man4/cs.4.gz
OLD_FILES+=usr/share/man/man4/de.4.gz
OLD_FILES+=usr/share/man/man4/if_de.4.gz
OLD_FILES+=usr/share/man/man4/ed.4.gz
OLD_FILES+=usr/share/man/man4/if_ed.4.gz
OLD_FILES+=usr/share/man/man4/ep.4.gz
OLD_FILES+=usr/share/man/man4/ex.4.gz
OLD_FILES+=usr/share/man/man4/fe.4.gz
OLD_FILES+=usr/share/man/man4/pcn.4.gz
OLD_FILES+=usr/share/man/man4/if_pcn.4.gz
OLD_FILES+=usr/share/man/man4/sf.4.gz
OLD_FILES+=usr/share/man/man4/if_sf.4.gz
OLD_FILES+=usr/share/man/man4/sn.4.gz
OLD_FILES+=usr/share/man/man4/if_sn.4.gz
OLD_FILES+=usr/share/man/man4/tl.4.gz
OLD_FILES+=usr/share/man/man4/if_tl.4.gz
OLD_FILES+=usr/share/man/man4/tx.4.gz
OLD_FILES+=usr/share/man/man4/if_tx.4.gz
OLD_FILES+=usr/share/man/man4/txp.4.gz
OLD_FILES+=usr/share/man/man4/if_txp.4.gz
OLD_FILES+=usr/share/man/man4/vx.4.gz
OLD_FILES+=usr/share/man/man4/wb.4.gz
OLD_FILES+=usr/share/man/man4/if_wb.4.gz
OLD_FILES+=usr/share/man/man4/xe.4.gz
OLD_FILES+=usr/share/man/man4/if_xe.4.gz
# 20190513: libcap_sysctl interface change
OLD_LIBS+=lib/casper/libcap_sysctl.so.1
# 20190509: tests/sys/opencrypto requires the net/py-dpkt package
OLD_FILES+=usr/tests/sys/opencrypto/dpkt.py
OLD_FILES+=usr/tests/sys/opencrypto/dpkt.pyc
# 20190304: new libc++ import which bumps version from 7.0.1 to 8.0.0
OLD_FILES+=usr/include/c++/v1/experimental/dynarray
# 20190304: new clang import which bumps version from 7.0.1 to 8.0.0
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/netbsd_syscall_hooks.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/7.0.1/include/sanitizer
OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_device_functions.h
OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_libdevice_declares.h
OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/7.0.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/7.0.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/7.0.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/7.0.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/altivec.h
OLD_FILES+=usr/lib/clang/7.0.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/arm64intr.h
OLD_FILES+=usr/lib/clang/7.0.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/7.0.1/include/arm_fp16.h
OLD_FILES+=usr/lib/clang/7.0.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/7.0.1/include/armintr.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/cetintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/cldemoteintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/7.0.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/htmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/invpcidintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/7.0.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/7.0.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/7.0.1/include/movdirintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/msa.h
OLD_FILES+=usr/lib/clang/7.0.1/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/opencl-c.h
OLD_FILES+=usr/lib/clang/7.0.1/include/pconfigintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/ptwriteintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/s390intrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sgxintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/vadefs.h
OLD_FILES+=usr/lib/clang/7.0.1/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/vecintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/waitpkgintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/wbnoinvdintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/xopintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/7.0.1/include
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.msan-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/7.0.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/7.0.1/lib
OLD_DIRS+=usr/lib/clang/7.0.1
# 20190227: rename seq.h to seqc.h
OLD_FILES+=usr/include/sys/seq.h
# 20190222: libifconfig made INTERNALLIB
OLD_FILES+=usr/lib/libprivateifconfig.a
OLD_FILES+=usr/lib/libprivateifconfig_p.a
# 20190131: pfil(9) changed
OLD_FILES+=usr/share/man/man9/pfil_hook_get.9.gz
OLD_FILES+=usr/share/man/man9/pfil_rlock.9.gz
OLD_FILES+=usr/share/man/man9/pfil_runlock.9.gz
OLD_FILES+=usr/share/man/man9/pfil_wlock.9.gz
OLD_FILES+=usr/share/man/man9/pfil_wunlock.9.gz
# 20190126: adv(4) / adw(4) removal
OLD_FILES+=usr/share/man/man4/adv.4.gz
OLD_FILES+=usr/share/man/man4/adw.4.gz
# 20190123: nonexistant cred_update_thread(9) removed
OLD_FILES+=usr/share/man/man9/cred_update_thread.9.gz
# 20190114: old pbuf allocator removed
OLD_FILES+=usr/share/man/man9/getpbuf.9.gz
OLD_FILES+=usr/share/man/man9/pbuf.9.gz
OLD_FILES+=usr/share/man/man9/relpbuf.9.gz
OLD_FILES+=usr/share/man/man9/trypbuf.9.gz
# 20181219: ibcs removal
OLD_FILES+=usr/share/examples/ibcs2/hello.uu
OLD_FILES+=usr/share/examples/ibcs2/README
OLD_DIRS+=usr/share/examples/ibcs2
# 20181215: Migration of CTM to ports
OLD_FILES+=usr/sbin/ctm
OLD_FILES+=usr/sbin/ctm_dequeue
OLD_FILES+=usr/sbin/ctm_rmail
OLD_FILES+=usr/sbin/ctm_smail
OLD_FILES+=usr/share/man/man1/ctm.1.gz
OLD_FILES+=usr/share/man/man1/ctm_dequeue.1.gz
OLD_FILES+=usr/share/man/man1/ctm_rmail.1.gz
OLD_FILES+=usr/share/man/man1/ctm_smail.1.gz
OLD_FILES+=usr/share/man/man5/ctm.5.gz
# 20181214: Remove timed files
OLD_FILES+=etc/rc.d/timed
OLD_FILES+=usr/sbin/timed
OLD_FILES+=usr/sbin/timedc
OLD_FILES+=usr/share/man/man8/timed.8.gz
OLD_FILES+=usr/share/man/man8/timedc.8.gz
# 20181211: new clang import which bumps version from 6.0.1 to 7.0.1
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/6.0.1/include/sanitizer
OLD_FILES+=usr/lib/clang/6.0.1/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/6.0.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/altivec.h
OLD_FILES+=usr/lib/clang/6.0.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/arm64intr.h
OLD_FILES+=usr/lib/clang/6.0.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/6.0.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/6.0.1/include/armintr.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/cetintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/6.0.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/htmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/6.0.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/6.0.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/6.0.1/include/msa.h
OLD_FILES+=usr/lib/clang/6.0.1/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/opencl-c.h
OLD_FILES+=usr/lib/clang/6.0.1/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/s390intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/vadefs.h
OLD_FILES+=usr/lib/clang/6.0.1/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/vecintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/xopintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/6.0.1/include
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/6.0.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/6.0.1/lib
OLD_DIRS+=usr/lib/clang/6.0.1
# 20181116: Rename test file
OLD_FILES+=usr/tests/sys/netinet/reuseport_lb
# 20181113: libufs version bumped to 7
OLD_LIBS+=lib/libufs.so.6
# 20181112: Cleanup old libcap_dns
OLD_LIBS+=lib/casper/libcap_dns.so.1
# 20181030: malloc_domain(9) KPI change
OLD_FILES+=usr/share/man/man9/malloc_domain.9.gz
# 20181026: joy(4) removal
OLD_FILES+=usr/share/man/man4/joy.4.gz
# 20181025: OpenSSL libraries version bump to avoid conflict with ports
OLD_LIBS+=lib/libcrypto.so.9
OLD_LIBS+=usr/lib/libssl.so.9
# 20181022: aha(4) removal
OLD_FILES+=usr/share/man/man4/aha.4.gz
# 20181022: dpt(4) removal
OLD_FILES+=usr/share/man/man4/dpt.4.gz
# 20181022: ncr(4) removal
OLD_FILES+=usr/share/man/man4/ncr.4.gz
# 20181022: ncv(4) removal
OLD_FILES+=usr/share/man/man4/ncv.4.gz
# 20181022: nsp(4) removal
OLD_FILES+=usr/share/man/man4/nsp.4.gz
# 20181022: stg(4) removal
OLD_FILES+=usr/share/man/man4/stg.4.gz
# 20181021: mse(4) removal
OLD_FILES+=usr/share/man/man4/mse.4.gz
# 20181015: Stale libcasper(3) files following r329452
OLD_LIBS+=lib/casper/libcap_sysctl.so.0
OLD_LIBS+=lib/casper/libcap_grp.so.0
OLD_LIBS+=lib/casper/libcap_pwd.so.0
OLD_LIBS+=lib/casper/libcap_random.so.0
OLD_LIBS+=lib/casper/libcap_dns.so.0
OLD_LIBS+=lib/casper/libcap_syslog.so.0
# 20181012: rename of ixlv(4) to iavf(4)
OLD_FILES+=usr/share/man/man4/if_ixlv.4.gz
OLD_FILES+=usr/share/man/man4/ixlv.4.gz
# 20181009: OpenSSL 1.1.1
OLD_FILES+=usr/include/openssl/des_old.h
OLD_FILES+=usr/include/openssl/dso.h
OLD_FILES+=usr/include/openssl/krb5_asn.h
OLD_FILES+=usr/include/openssl/kssl.h
OLD_FILES+=usr/include/openssl/pqueue.h
OLD_FILES+=usr/include/openssl/ssl23.h
OLD_FILES+=usr/include/openssl/ui_compat.h
OLD_FILES+=usr/share/openssl/man/man1/dss1.1.gz
OLD_FILES+=usr/share/openssl/man/man1/md2.1.gz
OLD_FILES+=usr/share/openssl/man/man1/md4.1.gz
OLD_FILES+=usr/share/openssl/man/man1/md5.1.gz
OLD_FILES+=usr/share/openssl/man/man1/mdc2.1.gz
OLD_FILES+=usr/share/openssl/man/man1/ripemd160.1.gz
OLD_FILES+=usr/share/openssl/man/man1/sha.1.gz
OLD_FILES+=usr/share/openssl/man/man1/sha1.1.gz
OLD_FILES+=usr/share/openssl/man/man1/sha224.1.gz
OLD_FILES+=usr/share/openssl/man/man1/sha256.1.gz
OLD_FILES+=usr/share/openssl/man/man1/sha384.1.gz
OLD_FILES+=usr/share/openssl/man/man1/sha512.1.gz
OLD_FILES+=usr/share/openssl/man/man1/x509v3_config.1.gz
OLD_FILES+=usr/share/openssl/man/man3/ASN1_STRING_length_set.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BIO_get_conn_int_port.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BIO_get_conn_ip.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BIO_set.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BIO_set_conn_int_port.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BIO_set_conn_ip.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BN_BLINDING_get_thread_id.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BN_BLINDING_set_thread_id.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BN_BLINDING_thread_id.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BN_CTX_init.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BN_MONT_CTX_init.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BN_RECP_CTX_init.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BN_init.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BUF_memdup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BUF_memdup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BUF_strdup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BUF_strlcat.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BUF_strlcpy.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BUF_strndup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CMS_set1_signer_cert.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_cmp.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_cpy.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_current.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_get_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_hash.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_set_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_destroy_dynlockid.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_get_new_dynlockid.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_lock.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_num_locks.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_dynlock_create_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_dynlock_destroy_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_dynlock_lock_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_locking_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/DES_ede3_cbcm_encrypt.3.gz
OLD_FILES+=usr/share/openssl/man/man3/DES_enc_read.3.gz
OLD_FILES+=usr/share/openssl/man/man3/DES_enc_write.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EC_KEY_get_key_method_data.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EC_KEY_insert_key_method_data.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EC_POINT_set_Jprojective_coordinates.3.gz
OLD_FILES+=usr/share/openssl/man/man3/ERR_load_UI_strings.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_CIPHER_CTX_cleanup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_CIPHER_CTX_init.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_MAX_MD_SIZE.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_MD_CTX_cleanup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_MD_CTX_create.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_MD_CTX_destroy.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_MD_CTX_init.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEVP_PKEY_CTX_set_app_data.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_CTX_set_rsa_rsa_keygen_bits.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_get_default_digest.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_dss.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_dss1.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_sha.3.gz
OLD_FILES+=usr/share/openssl/man/man3/HMAC_CTX_cleanup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/HMAC_CTX_init.3.gz
OLD_FILES+=usr/share/openssl/man/man3/HMAC_cleanup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/OPENSSL_ia32cap_loc.3.gz
OLD_FILES+=usr/share/openssl/man/man3/PEM.3.gz
OLD_FILES+=usr/share/openssl/man/man3/RAND_SSLeay.3.gz
OLD_FILES+=usr/share/openssl/man/man3/RSA_PKCS1_SSLeay.3.gz
OLD_FILES+=usr/share/openssl/man/man3/RSA_null_method.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_need_tmp_rsa.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_set_custom_cli_ext.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_set_default_read_ahead.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_set_tmp_rsa.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_set_tmp_rsa_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_add_session.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_flush_sessions.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_get_accept_state.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_get_msg_callback_arg.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_need_tmp_rsa.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_remove_session.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_set_tmp_rsa.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_set_tmp_rsa_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSLeay.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSLeay_add_ssl_algorithms.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSLeay_version.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSLv2_client_method.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSLv2_method.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSLv2_server_method.3.gz
OLD_FILES+=usr/share/openssl/man/man3/X509_STORE_CTX_set_chain.3.gz
OLD_FILES+=usr/share/openssl/man/man3/X509_STORE_CTX_trusted_stack.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/blowfish.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_add_words.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_check_top.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_cmp_words.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_div_words.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_dump.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_expand.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_expand2.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_fix_top.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_internal.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_add_words.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_comba4.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_comba8.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_high.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_low_normal.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_low_recursive.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_normal.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_part_recursive.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_recursive.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_words.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_print.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_set_high.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_set_low.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_set_max.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_sqr_comba4.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_sqr_comba8.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_sqr_normal.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_sqr_recursive.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_sqr_words.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_sub_words.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_wexpand.3.gz
OLD_FILES+=usr/share/openssl/man/man3/buffer.3.gz
OLD_FILES+=usr/share/openssl/man/man3/crypto.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_ECPKParameters_bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_ECPKParameters_fp.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_ECPrivate_key.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_Netscape_RSA.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_PKCS8PrivateKey.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_Private_key.3.gz
OLD_FILES+=usr/share/openssl/man/man3/des.3.gz
OLD_FILES+=usr/share/openssl/man/man3/des_read_2passwords.3.gz
OLD_FILES+=usr/share/openssl/man/man3/des_read_password.3.gz
OLD_FILES+=usr/share/openssl/man/man3/des_read_pw.3.gz
OLD_FILES+=usr/share/openssl/man/man3/des_read_pw_string.3.gz
OLD_FILES+=usr/share/openssl/man/man3/dh.3.gz
OLD_FILES+=usr/share/openssl/man/man3/dsa.3.gz
OLD_FILES+=usr/share/openssl/man/man3/ec.3.gz
OLD_FILES+=usr/share/openssl/man/man3/ecdsa.3.gz
OLD_FILES+=usr/share/openssl/man/man3/engine.3.gz
OLD_FILES+=usr/share/openssl/man/man3/err.3.gz
OLD_FILES+=usr/share/openssl/man/man3/evp.3.gz
OLD_FILES+=usr/share/openssl/man/man3/hmac.3.gz
OLD_FILES+=usr/share/openssl/man/man3/i2d_ECPKParameters_bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/i2d_ECPKParameters_fp.3.gz
OLD_FILES+=usr/share/openssl/man/man3/i2d_Netscape_RSA.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_delete.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_doall.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_doall_arg.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_error.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_free.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_insert.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_new.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_node_stats.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_node_stats_bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_node_usage_stats.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_node_usage_stats_bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_retrieve.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_stats.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_stats_bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lhash.3.gz
OLD_FILES+=usr/share/openssl/man/man3/md5.3.gz
OLD_FILES+=usr/share/openssl/man/man3/mdc2.3.gz
OLD_FILES+=usr/share/openssl/man/man3/pem.3.gz
OLD_FILES+=usr/share/openssl/man/man3/rand.3.gz
OLD_FILES+=usr/share/openssl/man/man3/rc4.3.gz
OLD_FILES+=usr/share/openssl/man/man3/ripemd.3.gz
OLD_FILES+=usr/share/openssl/man/man3/rsa.3.gz
OLD_FILES+=usr/share/openssl/man/man3/sha.3.gz
OLD_FILES+=usr/share/openssl/man/man3/ssl.3.gz
OLD_FILES+=usr/share/openssl/man/man3/threads.3.gz
OLD_FILES+=usr/share/openssl/man/man3/ui.3.gz
OLD_FILES+=usr/share/openssl/man/man3/ui_compat.3.gz
OLD_FILES+=usr/share/openssl/man/man3/x509.3.gz
OLD_LIBS+=lib/libcrypto.so.8
OLD_LIBS+=usr/lib/engines/lib4758cca.so
OLD_LIBS+=usr/lib/engines/libaep.so
OLD_LIBS+=usr/lib/engines/libatalla.so
OLD_LIBS+=usr/lib/engines/libcapi.so
OLD_LIBS+=usr/lib/engines/libchil.so
OLD_LIBS+=usr/lib/engines/libcswift.so
OLD_LIBS+=usr/lib/engines/libgost.so
OLD_LIBS+=usr/lib/engines/libnuron.so
OLD_LIBS+=usr/lib/engines/libsureware.so
OLD_LIBS+=usr/lib/engines/libubsec.so
OLD_LIBS+=usr/lib/libssl.so.8
OLD_LIBS+=usr/lib32/lib4758cca.so
OLD_LIBS+=usr/lib32/libaep.so
OLD_LIBS+=usr/lib32/libatalla.so
OLD_LIBS+=usr/lib32/libcapi.so
OLD_LIBS+=usr/lib32/libchil.so
OLD_LIBS+=usr/lib32/libcswift.so
OLD_LIBS+=usr/lib32/libgost.so
OLD_LIBS+=usr/lib32/libnuron.so
OLD_LIBS+=usr/lib32/libsureware.so
OLD_LIBS+=usr/lib32/libubsec.so
# 20180824: libbe(3) SHLIBDIR fixed to reflect correct location
MOVED_LIBS+=usr/lib/libbe.so.1
# 20180819: Remove deprecated arc4random(3) stir/addrandom interfaces
OLD_FILES+=usr/share/man/man3/arc4random_addrandom.3.gz
OLD_FILES+=usr/share/man/man3/arc4random_stir.3.gz
# 20180819: send-pr(1) placeholder removal
OLD_FILES+=usr/bin/send-pr
# 20180801: jedec_ts(4) removed
OLD_FILES+=usr/share/man/man4/jedec_ts.4.gz
# 20180725: Cleanup old libcasper.so.0
OLD_LIBS+=lib/libcasper.so.0
# 20180722: indent(1) option renamed, test files follow
OLD_FILES+=usr/bin/indent/tests/nsac.0
OLD_FILES+=usr/bin/indent/tests/nsac.0.pro
OLD_FILES+=usr/bin/indent/tests/nsac.0.stdout
OLD_FILES+=usr/bin/indent/tests/sac.0
OLD_FILES+=usr/bin/indent/tests/sac.0.pro
OLD_FILES+=usr/bin/indent/tests/sac.0.stdout
# 20180721: move of libmlx5.so.1 and libibverbs.so.1
MOVED_LIBS+=usr/lib/libmlx5.so.1
MOVED_LIBS+=usr/lib/libibverbs.so.1
# 20180720: zfsloader.8 merged into loader.8
OLD_FILES+=usr/share/man/man8/zfsloader.8.gz
# 20180710: old numa cleanup
OLD_FILES+=usr/include/sys/numa.h
OLD_FILES+=usr/share/man/man2/numa_getaffinity.2.gz
OLD_FILES+=usr/share/man/man2/numa_setaffinity.2.gz
OLD_FILES+=usr/share/man/man1/numactl.1.gz
OLD_FILES+=usr/bin/numactl
# 20180630: new clang import which bumps version from 6.0.0 to 6.0.1
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/6.0.0/include/sanitizer
OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/6.0.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/altivec.h
OLD_FILES+=usr/lib/clang/6.0.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/arm64intr.h
OLD_FILES+=usr/lib/clang/6.0.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/6.0.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/6.0.0/include/armintr.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/cetintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/6.0.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/6.0.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/6.0.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/6.0.0/include/msa.h
OLD_FILES+=usr/lib/clang/6.0.0/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/opencl-c.h
OLD_FILES+=usr/lib/clang/6.0.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/6.0.0/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/6.0.0/include
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/6.0.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/6.0.0/lib
OLD_DIRS+=usr/lib/clang/6.0.0
# 20180615: asf(8) removed
OLD_FILES+=usr/sbin/asf
OLD_FILES+=usr/share/man/man8/asf.8.gz
# 20180609: obsolete libc++ files missed from the 5.0.0 import
OLD_FILES+=usr/include/c++/v1/__refstring
OLD_FILES+=usr/include/c++/v1/__undef_min_max
OLD_FILES+=usr/include/c++/v1/tr1/__refstring
OLD_FILES+=usr/include/c++/v1/tr1/__undef_min_max
# 20180607: remove nls support from grep
OLD_FILES+=usr/share/nls/pt_BR.ISO8859-1/grep.cat
OLD_FILES+=usr/share/nls/hu_HU.ISO8859-2/grep.cat
OLD_FILES+=usr/share/nls/ja_JP.SJIS/grep.cat
OLD_FILES+=usr/share/nls/ja_JP.eucJP/grep.cat
OLD_FILES+=usr/share/nls/gl_ES.ISO8859-1/grep.cat
OLD_FILES+=usr/share/nls/zh_CN.UTF-8/grep.cat
OLD_FILES+=usr/share/nls/es_ES.ISO8859-1/grep.cat
OLD_FILES+=usr/share/nls/ru_RU.KOI8-R/grep.cat
OLD_FILES+=usr/share/nls/uk_UA.UTF-8/grep.cat
OLD_FILES+=usr/share/nls/ja_JP.UTF-8/grep.cat
# 20180528: libpcap update removed header file
OLD_FILES+=usr/include/pcap/export-defs.h
# 20180517: retire vxge
OLD_FILES+=usr/share/man/man4/if_vxge.4.gz
OLD_FILES+=usr/share/man/man4/vxge.4.gz
# 20180512: Rename Unbound tools
OLD_FILES+=usr/sbin/unbound
OLD_FILES+=usr/sbin/unbound-anchor
OLD_FILES+=usr/sbin/unbound-checkconf
OLD_FILES+=usr/sbin/unbound-control
OLD_FILES+=usr/share/man/man5/unbound.conf.5.gz
OLD_FILES+=usr/share/man/man8/unbound-anchor.8.gz
OLD_FILES+=usr/share/man/man8/unbound-checkconf.8.gz
OLD_FILES+=usr/share/man/man8/unbound-control.8.gz
OLD_FILES+=usr/share/man/man8/unbound.8.gz
# 20180508: retire nxge
OLD_FILES+=usr/share/man/man4/if_nxge.4.gz
OLD_FILES+=usr/share/man/man4/nxge.4.gz
# 20180505: rhosts
OLD_FILES+=usr/share/skel/dot.rhosts
# 20180502: retire ixgb
OLD_FILES+=usr/share/man/man4/if_ixgb.4.gz
OLD_FILES+=usr/share/man/man4/ixgb.4.gz
# 20180501: retire lmc
OLD_FILES+=usr/include/dev/lmc/if_lmc.h
OLD_DIRS+=usr/include/dev/lmc
OLD_FILES+=usr/sbin/lmcconfig
OLD_FILES+=usr/share/man/man4/lmc.4.gz
OLD_FILES+=usr/share/man/man4/if_lmc.4.gz
OLD_FILES+=usr/share/man/man8/lmcconfig.8.gz
# 20180417: remove fuswintr and suswintr
OLD_FILES+=usr/share/man/man9/fuswintr.9.gz
OLD_FILES+=usr/share/man/man9/suswintr.9.gz
# 20180413: remove Arcnet support
OLD_FILES+=usr/include/net/if_arc.h
OLD_FILES+=usr/share/man/man4/cm.4.gz
# 20180409: remove FDDI support
OLD_FILES+=usr/include/net/fddi.h
OLD_FILES+=usr/share/man/man4/fpa.4.gz
# 20180319: remove /boot/overlays, replaced by /boot/dtb/overlays
OLD_DIRS+=boot/overlays
# 20180311: remove sys/sys/i386/include/pcaudioio.h
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/machine/pcaudioio.h
.endif
# 20180310: remove sys/sys/dataacq.h
OLD_FILES+=usr/include/sys/dataacq.h
# 20180306: remove DTrace scripts made obsolete by dwatch(1)
OLD_FILES+=usr/share/dtrace/watch_execve
OLD_FILES+=usr/share/dtrace/watch_kill
OLD_FILES+=usr/share/dtrace/watch_vop_remove
# 20180212: move devmatch
OLD_FILES+=usr/sbin/devmatch
# 20180211: remove usb.conf
OLD_FILES+=etc/devd/usb.conf
# 20180208: remove c_rehash(1)
OLD_FILES+=usr/share/openssl/man/man1/c_rehash.1.gz
# 20180206: remove gdbtui
OLD_FILES+=usr/bin/gdbtui
# 20180201: Obsolete forth files
OLD_FILES+=boot/pcibios.4th
# 20180114: new clang import which bumps version from 5.0.1 to 6.0.0
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/5.0.1/include/sanitizer
OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/5.0.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/altivec.h
OLD_FILES+=usr/lib/clang/5.0.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/5.0.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/5.0.1/include/armintr.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/5.0.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/htmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/5.0.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/5.0.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/5.0.1/include/msa.h
OLD_FILES+=usr/lib/clang/5.0.1/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/opencl-c.h
OLD_FILES+=usr/lib/clang/5.0.1/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/s390intrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/vadefs.h
OLD_FILES+=usr/lib/clang/5.0.1/include/vecintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/xopintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/5.0.1/include
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/5.0.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/5.0.1/lib
OLD_DIRS+=usr/lib/clang/5.0.1
# 20180109: Remove vestiges of digi(4) driver
OLD_FILES+=usr/include/sys/digiio.h
OLD_FILES+=usr/sbin/digictl
OLD_FILES+=usr/share/man/man8/digictl.8.gz
# 20180107: Convert remaining geli(8) tests to ATF
OLD_FILES+=tests/sys/geom/class/eli/nokey_test.sh
OLD_FILES+=tests/sys/geom/class/eli/readonly_test.sh
# 20180106: Convert most geli(8) tests to ATF
OLD_FILES+=tests/sys/geom/class/eli/attach_d_test.sh
OLD_FILES+=tests/sys/geom/class/eli/configure_b_B_test.sh
OLD_FILES+=tests/sys/geom/class/eli/detach_l_test.sh
OLD_FILES+=tests/sys/geom/class/eli/init_B_test.sh
OLD_FILES+=tests/sys/geom/class/eli/init_J_test.sh
OLD_FILES+=tests/sys/geom/class/eli/init_a_test.sh
OLD_FILES+=tests/sys/geom/class/eli/init_alias_test.sh
OLD_FILES+=tests/sys/geom/class/eli/init_i_P_test.sh
OLD_FILES+=tests/sys/geom/class/eli/integrity_copy_test.sh
OLD_FILES+=tests/sys/geom/class/eli/integrity_data_test.sh
OLD_FILES+=tests/sys/geom/class/eli/integrity_hmac_test.sh
OLD_FILES+=tests/sys/geom/class/eli/onetime_a_test.sh
OLD_FILES+=tests/sys/geom/class/eli/onetime_d_test.sh
# 20171230: Remove /etc/skel from mtree
OLD_DIRS+=etc/skel
# 20171208: Remove basename_r(3)
OLD_FILES+=usr/share/man/man3/basename_r.3.gz
# 20171206: Remove sponge(1)
OLD_FILES+=usr/bin/sponge
OLD_FILES+=usr/share/man/man1/sponge.1.gz
# 20171204: Move fdformat man page from volume 1 to volume 8
OLD_FILES+=usr/share/man/man1/fdformat.1.gz
# 20171203: libproc version bump
OLD_LIBS+=usr/lib/libproc.so.4
# 20171203: new clang import which bumps version from 5.0.0 to 5.0.1
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/5.0.0/include/sanitizer
OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/5.0.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/altivec.h
OLD_FILES+=usr/lib/clang/5.0.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/5.0.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/5.0.0/include/armintr.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/5.0.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/5.0.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/5.0.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/5.0.0/include/msa.h
OLD_FILES+=usr/lib/clang/5.0.0/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/opencl-c.h
OLD_FILES+=usr/lib/clang/5.0.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/5.0.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/5.0.0/include
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/5.0.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/5.0.0/lib
OLD_DIRS+=usr/lib/clang/5.0.0
# 20171118: Remove old etc casper files
OLD_FILES+=etc/casper/system.dns
OLD_FILES+=etc/casper/system.grp
OLD_FILES+=etc/casper/system.pwd
OLD_FILES+=etc/casper/system.random
OLD_FILES+=etc/casper/system.sysctl
OLD_DIRS+=etc/casper
# 20171116: lint(1) removal
OLD_FILES+=usr/bin/lint
OLD_FILES+=usr/libexec/lint1
OLD_FILES+=usr/libexec/lint2
OLD_FILES+=usr/libdata/lint/llib-lposix.ln
OLD_FILES+=usr/libdata/lint/llib-lstdc.ln
OLD_FILES+=usr/share/man/man1/lint.1.gz
OLD_FILES+=usr/share/man/man7/lint.7.gz
OLD_DIRS+=usr/libdata/lint
# 20171114: Removal of all fortune datfiles other than freebsd-tips
OLD_FILES+=usr/share/games/fortune/fortunes
OLD_FILES+=usr/share/games/fortune/fortunes.dat
OLD_FILES+=usr/share/games/fortune/gerrold.limerick
OLD_FILES+=usr/share/games/fortune/gerrold.limerick.dat
OLD_FILES+=usr/share/games/fortune/limerick
OLD_FILES+=usr/share/games/fortune/limerick.dat
OLD_FILES+=usr/share/games/fortune/murphy
OLD_FILES+=usr/share/games/fortune/murphy-o
OLD_FILES+=usr/share/games/fortune/murphy-o.dat
OLD_FILES+=usr/share/games/fortune/murphy.dat
OLD_FILES+=usr/share/games/fortune/startrek
OLD_FILES+=usr/share/games/fortune/startrek.dat
OLD_FILES+=usr/share/games/fortune/zippy
OLD_FILES+=usr/share/games/fortune/zippy.dat
# 20171112: Removal of eqnchar definition
OLD_FILES+=usr/share/misc/eqnchar
# 20171110: Removal of mailaddr man page
OLD_FILES+=usr/share/man/man7/mailaddr.7.gz
# 20171108: Rename of NgSendMsgReply to NgSendReplyMsg
OLD_FILES+=usr/share/man/man3/NgSendMsgReply.3.gz
# 20171108: badsect(8) removal
OLD_FILES+=sbin/badsect
OLD_FILES+=rescue/badsect
OLD_FILES+=usr/share/man/man8/badsect.8.gz
# 20171105: fixing lib/libclang_rt CRTARCH for arm:armv[67]
.if ${MACHINE_ARCH:Marmv[67]*} != "" && \
(!defined(CPUTYPE) || ${CPUTYPE:M*soft*} == "")
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-preinit-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-arm.a
OLD_LIBS+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-arm.so
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan_cxx-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.safestack-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats_client-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-arm.a
.endif
# 20171104: libcap_random should be in /lib not in /usr/lib
OLD_LIBS+=usr/lib/libcap_random.so.0
# 20171104: Casper can work only as shared library
OLD_FILES+=usr/lib/libcap_dns.a
OLD_FILES+=usr/lib/libcap_dns_p.a
OLD_FILES+=usr/lib/libcap_grp.a
OLD_FILES+=usr/lib/libcap_grp_p.a
OLD_FILES+=usr/lib/libcap_pwd.a
OLD_FILES+=usr/lib/libcap_pwd_p.a
OLD_FILES+=usr/lib/libcap_random.a
OLD_FILES+=usr/lib/libcap_random_p.a
OLD_FILES+=usr/lib/libcap_sysctl.a
OLD_FILES+=usr/lib/libcap_sysctl_p.a
OLD_FILES+=usr/lib/libcasper.a
OLD_FILES+=usr/lib/libcasper_p.a
# 20171031: Removal of adding_user man page
OLD_FILES+=usr/share/man/man7/adding_user.7.gz
# 20171031: Disconnected libpathconv tests
OLD_DIRS+=usr/tests/lib/libpathconv
# 20171017: Removal of mbpool(9)
OLD_FILES+=usr/include/sys/mbpool.h
OLD_FILES+=usr/share/man/man9/mbpool.9.gz
OLD_FILES+=usr/share/man/man9/mbp_destroy.9.gz
OLD_FILES+=usr/share/man/man9/mbp_alloc.9.gz
OLD_FILES+=usr/share/man/man9/mbp_ext_free.9.gz
OLD_FILES+=usr/share/man/man9/mbp_count.9.gz
OLD_FILES+=usr/share/man/man9/mbp_card_free.9.gz
OLD_FILES+=usr/share/man/man9/mbp_get_keep.9.gz
OLD_FILES+=usr/share/man/man9/mbp_free.9.gz
OLD_FILES+=usr/share/man/man9/mbp_get.9.gz
OLD_FILES+=usr/share/man/man9/mbp_create.9.gz
OLD_FILES+=usr/share/man/man9/mbp_sync.9.gz
# 20171010: Remove libstand
OLD_FILES+=usr/lib/libstand.a
OLD_FILES+=usr/lib/libstand_p.a
OLD_FILES+=usr/include/stand.h
OLD_FILES+=usr/share/man/man3/libstand.3.gz
# 20171003: remove RCMDS
OLD_FILES+=bin/rcp
OLD_FILES+=rescue/rcp
OLD_FILES+=usr/bin/rlogin
OLD_FILES+=usr/bin/rsh
OLD_FILES+=usr/libexec/rlogind
OLD_FILES+=usr/libexec/rshd
OLD_FILES+=usr/share/man/man1/rcp.1.gz
OLD_FILES+=usr/share/man/man1/rlogin.1.gz
OLD_FILES+=usr/share/man/man1/rsh.1.gz
OLD_FILES+=usr/share/man/man8/rlogind.8.gz
OLD_FILES+=usr/share/man/man8/rshd.8.gz
# 20170927: crshared
OLD_FILES+=usr/share/man/man9/crshared.9.gz
# 20170927: procctl
OLD_FILES+=usr/share/man/man8/procctl.8.gz
OLD_FILES+=usr/sbin/procctl
# 20170926: remove unneeded man aliases and locales directory
OLD_FILES+=usr/share/man/en.ISO8859-1/man1
OLD_FILES+=usr/share/man/en.ISO8859-1/man2
OLD_FILES+=usr/share/man/en.ISO8859-1/man3
OLD_FILES+=usr/share/man/en.ISO8859-1/man4
OLD_FILES+=usr/share/man/en.ISO8859-1/man5
OLD_FILES+=usr/share/man/en.ISO8859-1/man6
OLD_FILES+=usr/share/man/en.ISO8859-1/man7
OLD_FILES+=usr/share/man/en.ISO8859-1/man8
OLD_FILES+=usr/share/man/en.ISO8859-1/man9
OLD_DIRS+=usr/share/man/en.ISO8859-1
OLD_FILES+=usr/share/man/en.ISO8859-1/mandoc.db
OLD_FILES+=usr/share/man/en.UTF-8/man1
OLD_FILES+=usr/share/man/en.UTF-8/man2
OLD_FILES+=usr/share/man/en.UTF-8/man3
OLD_FILES+=usr/share/man/en.UTF-8/man4
OLD_FILES+=usr/share/man/en.UTF-8/man5
OLD_FILES+=usr/share/man/en.UTF-8/man6
OLD_FILES+=usr/share/man/en.UTF-8/man7
OLD_FILES+=usr/share/man/en.UTF-8/man8
OLD_FILES+=usr/share/man/en.UTF-8/man9
OLD_FILES+=usr/share/man/en.UTF-8/mandoc.db
OLD_DIRS+=usr/share/man/en.UTF-8
OLD_FILES+=usr/share/man/en.ISO8859-15
OLD_FILES+=usr/share/openssl/man/en.ISO8859-1/man1
OLD_FILES+=usr/share/openssl/man/en.ISO8859-1/man3
OLD_FILES+=usr/share/openssl/man/en.ISO8859-1/mandoc.db
OLD_DIRS+=usr/share/openssl/man/en.ISO8859-1
OLD_FILES+=usr/share/openssl/man/en.ISO8859-15
OLD_DIRS+=usr/share/man/ja/man1
OLD_DIRS+=usr/share/man/ja/man2
OLD_DIRS+=usr/share/man/ja/man3
OLD_DIRS+=usr/share/man/ja/man4
OLD_DIRS+=usr/share/man/ja/man5
OLD_DIRS+=usr/share/man/ja/man6
OLD_DIRS+=usr/share/man/ja/man7
OLD_DIRS+=usr/share/man/ja/man8
OLD_DIRS+=usr/share/man/ja/man9
OLD_DIRS+=usr/share/man/ja
# 20170913: remove unneeded catman utility
OLD_FILES+=etc/periodic/weekly/330.catman
OLD_FILES+=usr/bin/catman
OLD_FILES+=usr/libexec/catman.local
OLD_FILES+=usr/share/man/man1/catman.1.gz
OLD_FILES+=usr/share/man/man8/catman.local.8.gz
OLD_DIRS+=usr/share/man/cat1
OLD_DIRS+=usr/share/man/cat2
OLD_DIRS+=usr/share/man/cat3
OLD_DIRS+=usr/share/man/cat4/amd64
OLD_DIRS+=usr/share/man/cat4/arm
OLD_DIRS+=usr/share/man/cat4/i386
OLD_DIRS+=usr/share/man/cat4/powerpc
OLD_DIRS+=usr/share/man/cat4/sparc64
OLD_DIRS+=usr/share/man/cat4
OLD_DIRS+=usr/share/man/cat5
OLD_DIRS+=usr/share/man/cat6
OLD_DIRS+=usr/share/man/cat7
OLD_DIRS+=usr/share/man/cat8/amd64
OLD_DIRS+=usr/share/man/cat8/arm
OLD_DIRS+=usr/share/man/cat8/i386
OLD_DIRS+=usr/share/man/cat8/powerpc
OLD_DIRS+=usr/share/man/cat8/sparc64
OLD_DIRS+=usr/share/man/cat8
OLD_DIRS+=usr/share/man/cat9
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat1
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat2
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat3
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4/amd64
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4/arm
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4/i386
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4/powerpc
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4/sparc64
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat5
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat6
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat7
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8/amd64
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8/arm
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8/i386
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8/powerpc
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8/sparc64
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat9
OLD_DIRS+=usr/share/man/en.UTF-8/cat1
OLD_DIRS+=usr/share/man/en.UTF-8/cat2
OLD_DIRS+=usr/share/man/en.UTF-8/cat3
OLD_DIRS+=usr/share/man/en.UTF-8/cat4/amd64
OLD_DIRS+=usr/share/man/en.UTF-8/cat4/arm
OLD_DIRS+=usr/share/man/en.UTF-8/cat4/i386
OLD_DIRS+=usr/share/man/en.UTF-8/cat4/powerpc
OLD_DIRS+=usr/share/man/en.UTF-8/cat4/sparc64
OLD_DIRS+=usr/share/man/en.UTF-8/cat4
OLD_DIRS+=usr/share/man/en.UTF-8/cat5
OLD_DIRS+=usr/share/man/en.UTF-8/cat6
OLD_DIRS+=usr/share/man/en.UTF-8/cat7
OLD_DIRS+=usr/share/man/en.UTF-8/cat8/amd64
OLD_DIRS+=usr/share/man/en.UTF-8/cat8/arm
OLD_DIRS+=usr/share/man/en.UTF-8/cat8/i386
OLD_DIRS+=usr/share/man/en.UTF-8/cat8/powerpc
OLD_DIRS+=usr/share/man/en.UTF-8/cat8/sparc64
OLD_DIRS+=usr/share/man/en.UTF-8/cat8
OLD_DIRS+=usr/share/man/en.UTF-8/cat9
OLD_DIRS+=usr/share/man/ja/cat1
OLD_DIRS+=usr/share/man/ja/cat2
OLD_DIRS+=usr/share/man/ja/cat3
OLD_DIRS+=usr/share/man/ja/cat4/amd64
OLD_DIRS+=usr/share/man/ja/cat4/arm
OLD_DIRS+=usr/share/man/ja/cat4/i386
OLD_DIRS+=usr/share/man/ja/cat4/powerpc
OLD_DIRS+=usr/share/man/ja/cat4/sparc64
OLD_DIRS+=usr/share/man/ja/cat4
OLD_DIRS+=usr/share/man/ja/cat5
OLD_DIRS+=usr/share/man/ja/cat6
OLD_DIRS+=usr/share/man/ja/cat7
OLD_DIRS+=usr/share/man/ja/cat8/amd64
OLD_DIRS+=usr/share/man/ja/cat8/arm
OLD_DIRS+=usr/share/man/ja/cat8/powerpc
OLD_DIRS+=usr/share/man/ja/cat8/sparc64
OLD_DIRS+=usr/share/man/ja/cat8
OLD_DIRS+=usr/share/man/ja/cat9
OLD_DIRS+=usr/share/openssl/man/cat1
OLD_DIRS+=usr/share/openssl/man/cat3
OLD_DIRS+=usr/share/openssl/man/en.ISO8859-1/cat1
OLD_DIRS+=usr/share/openssl/man/en.ISO8859-1/cat3
# 20170830: rename ntb_hw(4) to ntb_hw_intel(4)
OLD_FILES+=usr/share/man/man4/ntb_hw.4.gz
# 20170802: ksyms(4) ioctl interface was removed
OLD_FILES+=usr/include/sys/ksyms.h
# 20170729: the iicbus/pcf8563 driver is replaced with iicbus/nxprtc
OLD_FILES+=usr/include/dev/iicbus/pcf8563reg.h
# 20170727: options FLOWTABLE removed
OLD_FILES+=usr/include/net/flowtable.h
# 20170722: new clang import which bumps version from 4.0.0 to 5.0.0
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/4.0.0/include/sanitizer
OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/4.0.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/altivec.h
OLD_FILES+=usr/lib/clang/4.0.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/4.0.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/4.0.0/include/armintr.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/4.0.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/4.0.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/4.0.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/4.0.0/include/msa.h
OLD_FILES+=usr/lib/clang/4.0.0/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/opencl-c.h
OLD_FILES+=usr/lib/clang/4.0.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/4.0.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/4.0.0/include
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/4.0.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/4.0.0/lib
OLD_DIRS+=usr/lib/clang/4.0.0
OLD_FILES+=usr/bin/llvm-pdbdump
# 20170717: Remove documentation of vaporware
OLD_FILES+=usr/share/man/man2/pdwait4.2.gz
# 20170610: chown-f_test replaced by chown_test
OLD_FILES+=usr/tests/usr.sbin/chown/chown-f_test
# 20170609: drop obsolete manpage link (if_rtwn.ko -> rtwn.ko)
OLD_FILES+=usr/share/man/man4/if_rtwn.4.gz
# 20170531: removal of groff
OLD_FILES+=usr/bin/addftinfo
OLD_FILES+=usr/bin/afmtodit
OLD_FILES+=usr/bin/checknr
OLD_FILES+=usr/bin/colcrt
OLD_FILES+=usr/bin/eqn
OLD_FILES+=usr/bin/grn
OLD_FILES+=usr/bin/grodvi
OLD_FILES+=usr/bin/groff
OLD_FILES+=usr/bin/grog
OLD_FILES+=usr/bin/grolbp
OLD_FILES+=usr/bin/grolj4
OLD_FILES+=usr/bin/grops
OLD_FILES+=usr/bin/grotty
OLD_FILES+=usr/bin/hpftodit
OLD_FILES+=usr/bin/indxbib
OLD_FILES+=usr/bin/lkbib
OLD_FILES+=usr/bin/lookbib
OLD_FILES+=usr/bin/mmroff
OLD_FILES+=usr/bin/neqn
OLD_FILES+=usr/bin/nroff
OLD_FILES+=usr/bin/pfbtops
OLD_FILES+=usr/bin/pic
OLD_FILES+=usr/bin/post-grohtml
OLD_FILES+=usr/bin/pre-grohtml
OLD_FILES+=usr/bin/psroff
OLD_FILES+=usr/bin/refer
OLD_FILES+=usr/bin/tbl
OLD_FILES+=usr/bin/tfmtodit
OLD_FILES+=usr/bin/troff
OLD_FILES+=usr/bin/vgrind
OLD_FILES+=usr/libexec/vfontedpr
OLD_FILES+=usr/share/dict/eign
OLD_FILES+=usr/share/groff_font/devX100-12/CB
OLD_FILES+=usr/share/groff_font/devX100-12/CBI
OLD_FILES+=usr/share/groff_font/devX100-12/CI
OLD_FILES+=usr/share/groff_font/devX100-12/CR
OLD_FILES+=usr/share/groff_font/devX100-12/DESC
OLD_FILES+=usr/share/groff_font/devX100-12/HB
OLD_FILES+=usr/share/groff_font/devX100-12/HBI
OLD_FILES+=usr/share/groff_font/devX100-12/HI
OLD_FILES+=usr/share/groff_font/devX100-12/HR
OLD_FILES+=usr/share/groff_font/devX100-12/NB
OLD_FILES+=usr/share/groff_font/devX100-12/NBI
OLD_FILES+=usr/share/groff_font/devX100-12/NI
OLD_FILES+=usr/share/groff_font/devX100-12/NR
OLD_FILES+=usr/share/groff_font/devX100-12/S
OLD_FILES+=usr/share/groff_font/devX100-12/TB
OLD_FILES+=usr/share/groff_font/devX100-12/TBI
OLD_FILES+=usr/share/groff_font/devX100-12/TI
OLD_FILES+=usr/share/groff_font/devX100-12/TR
OLD_DIRS+=usr/share/groff_font/devX100-12
OLD_FILES+=usr/share/groff_font/devX100/CB
OLD_FILES+=usr/share/groff_font/devX100/CBI
OLD_FILES+=usr/share/groff_font/devX100/CI
OLD_FILES+=usr/share/groff_font/devX100/CR
OLD_FILES+=usr/share/groff_font/devX100/DESC
OLD_FILES+=usr/share/groff_font/devX100/HB
OLD_FILES+=usr/share/groff_font/devX100/HBI
OLD_FILES+=usr/share/groff_font/devX100/HI
OLD_FILES+=usr/share/groff_font/devX100/HR
OLD_FILES+=usr/share/groff_font/devX100/NB
OLD_FILES+=usr/share/groff_font/devX100/NBI
OLD_FILES+=usr/share/groff_font/devX100/NI
OLD_FILES+=usr/share/groff_font/devX100/NR
OLD_FILES+=usr/share/groff_font/devX100/S
OLD_FILES+=usr/share/groff_font/devX100/TB
OLD_FILES+=usr/share/groff_font/devX100/TBI
OLD_FILES+=usr/share/groff_font/devX100/TI
OLD_FILES+=usr/share/groff_font/devX100/TR
OLD_DIRS+=usr/share/groff_font/devX100
OLD_FILES+=usr/share/groff_font/devX75-12/CB
OLD_FILES+=usr/share/groff_font/devX75-12/CBI
OLD_FILES+=usr/share/groff_font/devX75-12/CI
OLD_FILES+=usr/share/groff_font/devX75-12/CR
OLD_FILES+=usr/share/groff_font/devX75-12/DESC
OLD_FILES+=usr/share/groff_font/devX75-12/HB
OLD_FILES+=usr/share/groff_font/devX75-12/HBI
OLD_FILES+=usr/share/groff_font/devX75-12/HI
OLD_FILES+=usr/share/groff_font/devX75-12/HR
OLD_FILES+=usr/share/groff_font/devX75-12/NB
OLD_FILES+=usr/share/groff_font/devX75-12/NBI
OLD_FILES+=usr/share/groff_font/devX75-12/NI
OLD_FILES+=usr/share/groff_font/devX75-12/NR
OLD_FILES+=usr/share/groff_font/devX75-12/S
OLD_FILES+=usr/share/groff_font/devX75-12/TB
OLD_FILES+=usr/share/groff_font/devX75-12/TBI
OLD_FILES+=usr/share/groff_font/devX75-12/TI
OLD_FILES+=usr/share/groff_font/devX75-12/TR
OLD_DIRS+=usr/share/groff_font/devX75-12
OLD_FILES+=usr/share/groff_font/devX75/CB
OLD_FILES+=usr/share/groff_font/devX75/CBI
OLD_FILES+=usr/share/groff_font/devX75/CI
OLD_FILES+=usr/share/groff_font/devX75/CR
OLD_FILES+=usr/share/groff_font/devX75/DESC
OLD_FILES+=usr/share/groff_font/devX75/HB
OLD_FILES+=usr/share/groff_font/devX75/HBI
OLD_FILES+=usr/share/groff_font/devX75/HI
OLD_FILES+=usr/share/groff_font/devX75/HR
OLD_FILES+=usr/share/groff_font/devX75/NB
OLD_FILES+=usr/share/groff_font/devX75/NBI
OLD_FILES+=usr/share/groff_font/devX75/NI
OLD_FILES+=usr/share/groff_font/devX75/NR
OLD_FILES+=usr/share/groff_font/devX75/S
OLD_FILES+=usr/share/groff_font/devX75/TB
OLD_FILES+=usr/share/groff_font/devX75/TBI
OLD_FILES+=usr/share/groff_font/devX75/TI
OLD_FILES+=usr/share/groff_font/devX75/TR
OLD_DIRS+=usr/share/groff_font/devX75
OLD_FILES+=usr/share/groff_font/devascii/B
OLD_FILES+=usr/share/groff_font/devascii/BI
OLD_FILES+=usr/share/groff_font/devascii/CW
OLD_FILES+=usr/share/groff_font/devascii/DESC
OLD_FILES+=usr/share/groff_font/devascii/I
OLD_FILES+=usr/share/groff_font/devascii/L
OLD_FILES+=usr/share/groff_font/devascii/R
OLD_FILES+=usr/share/groff_font/devascii/S
OLD_DIRS+=usr/share/groff_font/devascii
OLD_FILES+=usr/share/groff_font/devcp1047/B
OLD_FILES+=usr/share/groff_font/devcp1047/BI
OLD_FILES+=usr/share/groff_font/devcp1047/CW
OLD_FILES+=usr/share/groff_font/devcp1047/DESC
OLD_FILES+=usr/share/groff_font/devcp1047/I
OLD_FILES+=usr/share/groff_font/devcp1047/L
OLD_FILES+=usr/share/groff_font/devcp1047/R
OLD_FILES+=usr/share/groff_font/devcp1047/S
OLD_DIRS+=usr/share/groff_font/devcp1047
OLD_FILES+=usr/share/groff_font/devdvi/CW
OLD_FILES+=usr/share/groff_font/devdvi/CWEC
OLD_FILES+=usr/share/groff_font/devdvi/CWI
OLD_FILES+=usr/share/groff_font/devdvi/CWIEC
OLD_FILES+=usr/share/groff_font/devdvi/CWITC
OLD_FILES+=usr/share/groff_font/devdvi/CWTC
OLD_FILES+=usr/share/groff_font/devdvi/CompileFonts
OLD_FILES+=usr/share/groff_font/devdvi/DESC
OLD_FILES+=usr/share/groff_font/devdvi/EX
OLD_FILES+=usr/share/groff_font/devdvi/HB
OLD_FILES+=usr/share/groff_font/devdvi/HBEC
OLD_FILES+=usr/share/groff_font/devdvi/HBI
OLD_FILES+=usr/share/groff_font/devdvi/HBIEC
OLD_FILES+=usr/share/groff_font/devdvi/HBITC
OLD_FILES+=usr/share/groff_font/devdvi/HBTC
OLD_FILES+=usr/share/groff_font/devdvi/HI
OLD_FILES+=usr/share/groff_font/devdvi/HIEC
OLD_FILES+=usr/share/groff_font/devdvi/HITC
OLD_FILES+=usr/share/groff_font/devdvi/HR
OLD_FILES+=usr/share/groff_font/devdvi/HREC
OLD_FILES+=usr/share/groff_font/devdvi/HRTC
OLD_FILES+=usr/share/groff_font/devdvi/MI
OLD_FILES+=usr/share/groff_font/devdvi/Makefile
OLD_FILES+=usr/share/groff_font/devdvi/S
OLD_FILES+=usr/share/groff_font/devdvi/SA
OLD_FILES+=usr/share/groff_font/devdvi/SB
OLD_FILES+=usr/share/groff_font/devdvi/SC
OLD_FILES+=usr/share/groff_font/devdvi/TB
OLD_FILES+=usr/share/groff_font/devdvi/TBEC
OLD_FILES+=usr/share/groff_font/devdvi/TBI
OLD_FILES+=usr/share/groff_font/devdvi/TBIEC
OLD_FILES+=usr/share/groff_font/devdvi/TBITC
OLD_FILES+=usr/share/groff_font/devdvi/TBTC
OLD_FILES+=usr/share/groff_font/devdvi/TI
OLD_FILES+=usr/share/groff_font/devdvi/TIEC
OLD_FILES+=usr/share/groff_font/devdvi/TITC
OLD_FILES+=usr/share/groff_font/devdvi/TR
OLD_FILES+=usr/share/groff_font/devdvi/TREC
OLD_FILES+=usr/share/groff_font/devdvi/TRTC
OLD_FILES+=usr/share/groff_font/devdvi/ec.map
OLD_FILES+=usr/share/groff_font/devdvi/msam.map
OLD_FILES+=usr/share/groff_font/devdvi/msbm.map
OLD_FILES+=usr/share/groff_font/devdvi/tc.map
OLD_FILES+=usr/share/groff_font/devdvi/texb.map
OLD_FILES+=usr/share/groff_font/devdvi/texex.map
OLD_FILES+=usr/share/groff_font/devdvi/texi.map
OLD_FILES+=usr/share/groff_font/devdvi/texmi.map
OLD_FILES+=usr/share/groff_font/devdvi/texr.map
OLD_FILES+=usr/share/groff_font/devdvi/texsy.map
OLD_FILES+=usr/share/groff_font/devdvi/textex.map
OLD_FILES+=usr/share/groff_font/devdvi/textt.map
OLD_DIRS+=usr/share/groff_font/devdvi
OLD_FILES+=usr/share/groff_font/devhtml/B
OLD_FILES+=usr/share/groff_font/devhtml/BI
OLD_FILES+=usr/share/groff_font/devhtml/CB
OLD_FILES+=usr/share/groff_font/devhtml/CBI
OLD_FILES+=usr/share/groff_font/devhtml/CI
OLD_FILES+=usr/share/groff_font/devhtml/CR
OLD_FILES+=usr/share/groff_font/devhtml/DESC
OLD_FILES+=usr/share/groff_font/devhtml/I
OLD_FILES+=usr/share/groff_font/devhtml/R
OLD_FILES+=usr/share/groff_font/devhtml/S
OLD_DIRS+=usr/share/groff_font/devhtml
OLD_FILES+=usr/share/groff_font/devkoi8-r/B
OLD_FILES+=usr/share/groff_font/devkoi8-r/BI
OLD_FILES+=usr/share/groff_font/devkoi8-r/CW
OLD_FILES+=usr/share/groff_font/devkoi8-r/DESC
OLD_FILES+=usr/share/groff_font/devkoi8-r/I
OLD_FILES+=usr/share/groff_font/devkoi8-r/L
OLD_FILES+=usr/share/groff_font/devkoi8-r/R
OLD_FILES+=usr/share/groff_font/devkoi8-r/S
OLD_DIRS+=usr/share/groff_font/devkoi8-r
OLD_FILES+=usr/share/groff_font/devlatin1/B
OLD_FILES+=usr/share/groff_font/devlatin1/BI
OLD_FILES+=usr/share/groff_font/devlatin1/CW
OLD_FILES+=usr/share/groff_font/devlatin1/DESC
OLD_FILES+=usr/share/groff_font/devlatin1/I
OLD_FILES+=usr/share/groff_font/devlatin1/L
OLD_FILES+=usr/share/groff_font/devlatin1/R
OLD_FILES+=usr/share/groff_font/devlatin1/S
OLD_DIRS+=usr/share/groff_font/devlatin1
OLD_FILES+=usr/share/groff_font/devlbp/CB
OLD_FILES+=usr/share/groff_font/devlbp/CI
OLD_FILES+=usr/share/groff_font/devlbp/CR
OLD_FILES+=usr/share/groff_font/devlbp/DESC
OLD_FILES+=usr/share/groff_font/devlbp/EB
OLD_FILES+=usr/share/groff_font/devlbp/EI
OLD_FILES+=usr/share/groff_font/devlbp/ER
OLD_FILES+=usr/share/groff_font/devlbp/HB
OLD_FILES+=usr/share/groff_font/devlbp/HBI
OLD_FILES+=usr/share/groff_font/devlbp/HI
OLD_FILES+=usr/share/groff_font/devlbp/HNB
OLD_FILES+=usr/share/groff_font/devlbp/HNBI
OLD_FILES+=usr/share/groff_font/devlbp/HNI
OLD_FILES+=usr/share/groff_font/devlbp/HNR
OLD_FILES+=usr/share/groff_font/devlbp/HR
OLD_FILES+=usr/share/groff_font/devlbp/TB
OLD_FILES+=usr/share/groff_font/devlbp/TBI
OLD_FILES+=usr/share/groff_font/devlbp/TI
OLD_FILES+=usr/share/groff_font/devlbp/TR
OLD_DIRS+=usr/share/groff_font/devlbp
OLD_FILES+=usr/share/groff_font/devlj4/AB
OLD_FILES+=usr/share/groff_font/devlj4/ABI
OLD_FILES+=usr/share/groff_font/devlj4/AI
OLD_FILES+=usr/share/groff_font/devlj4/ALBB
OLD_FILES+=usr/share/groff_font/devlj4/ALBR
OLD_FILES+=usr/share/groff_font/devlj4/AOB
OLD_FILES+=usr/share/groff_font/devlj4/AOI
OLD_FILES+=usr/share/groff_font/devlj4/AOR
OLD_FILES+=usr/share/groff_font/devlj4/AR
OLD_FILES+=usr/share/groff_font/devlj4/CB
OLD_FILES+=usr/share/groff_font/devlj4/CBI
OLD_FILES+=usr/share/groff_font/devlj4/CI
OLD_FILES+=usr/share/groff_font/devlj4/CLARENDON
OLD_FILES+=usr/share/groff_font/devlj4/CORONET
OLD_FILES+=usr/share/groff_font/devlj4/CR
OLD_FILES+=usr/share/groff_font/devlj4/DESC
OLD_FILES+=usr/share/groff_font/devlj4/GB
OLD_FILES+=usr/share/groff_font/devlj4/GBI
OLD_FILES+=usr/share/groff_font/devlj4/GI
OLD_FILES+=usr/share/groff_font/devlj4/GR
OLD_FILES+=usr/share/groff_font/devlj4/LGB
OLD_FILES+=usr/share/groff_font/devlj4/LGI
OLD_FILES+=usr/share/groff_font/devlj4/LGR
OLD_FILES+=usr/share/groff_font/devlj4/MARIGOLD
OLD_FILES+=usr/share/groff_font/devlj4/OB
OLD_FILES+=usr/share/groff_font/devlj4/OBI
OLD_FILES+=usr/share/groff_font/devlj4/OI
OLD_FILES+=usr/share/groff_font/devlj4/OR
OLD_FILES+=usr/share/groff_font/devlj4/S
OLD_FILES+=usr/share/groff_font/devlj4/SYMBOL
OLD_FILES+=usr/share/groff_font/devlj4/TB
OLD_FILES+=usr/share/groff_font/devlj4/TBI
OLD_FILES+=usr/share/groff_font/devlj4/TI
OLD_FILES+=usr/share/groff_font/devlj4/TNRB
OLD_FILES+=usr/share/groff_font/devlj4/TNRBI
OLD_FILES+=usr/share/groff_font/devlj4/TNRI
OLD_FILES+=usr/share/groff_font/devlj4/TNRR
OLD_FILES+=usr/share/groff_font/devlj4/TR
OLD_FILES+=usr/share/groff_font/devlj4/UB
OLD_FILES+=usr/share/groff_font/devlj4/UBI
OLD_FILES+=usr/share/groff_font/devlj4/UCB
OLD_FILES+=usr/share/groff_font/devlj4/UCBI
OLD_FILES+=usr/share/groff_font/devlj4/UCI
OLD_FILES+=usr/share/groff_font/devlj4/UCR
OLD_FILES+=usr/share/groff_font/devlj4/UI
OLD_FILES+=usr/share/groff_font/devlj4/UR
OLD_FILES+=usr/share/groff_font/devlj4/WINGDINGS
OLD_DIRS+=usr/share/groff_font/devlj4
OLD_FILES+=usr/share/groff_font/devps/AB
OLD_FILES+=usr/share/groff_font/devps/ABI
OLD_FILES+=usr/share/groff_font/devps/AI
OLD_FILES+=usr/share/groff_font/devps/AR
OLD_FILES+=usr/share/groff_font/devps/BMB
OLD_FILES+=usr/share/groff_font/devps/BMBI
OLD_FILES+=usr/share/groff_font/devps/BMI
OLD_FILES+=usr/share/groff_font/devps/BMR
OLD_FILES+=usr/share/groff_font/devps/CB
OLD_FILES+=usr/share/groff_font/devps/CBI
OLD_FILES+=usr/share/groff_font/devps/CI
OLD_FILES+=usr/share/groff_font/devps/CR
OLD_FILES+=usr/share/groff_font/devps/DESC
OLD_FILES+=usr/share/groff_font/devps/EURO
OLD_FILES+=usr/share/groff_font/devps/HB
OLD_FILES+=usr/share/groff_font/devps/HBI
OLD_FILES+=usr/share/groff_font/devps/HI
OLD_FILES+=usr/share/groff_font/devps/HNB
OLD_FILES+=usr/share/groff_font/devps/HNBI
OLD_FILES+=usr/share/groff_font/devps/HNI
OLD_FILES+=usr/share/groff_font/devps/HNR
OLD_FILES+=usr/share/groff_font/devps/HR
OLD_FILES+=usr/share/groff_font/devps/Makefile
OLD_FILES+=usr/share/groff_font/devps/NB
OLD_FILES+=usr/share/groff_font/devps/NBI
OLD_FILES+=usr/share/groff_font/devps/NI
OLD_FILES+=usr/share/groff_font/devps/NR
OLD_FILES+=usr/share/groff_font/devps/PB
OLD_FILES+=usr/share/groff_font/devps/PBI
OLD_FILES+=usr/share/groff_font/devps/PI
OLD_FILES+=usr/share/groff_font/devps/PR
OLD_FILES+=usr/share/groff_font/devps/S
OLD_FILES+=usr/share/groff_font/devps/SS
OLD_FILES+=usr/share/groff_font/devps/TB
OLD_FILES+=usr/share/groff_font/devps/TBI
OLD_FILES+=usr/share/groff_font/devps/TI
OLD_FILES+=usr/share/groff_font/devps/TR
OLD_FILES+=usr/share/groff_font/devps/ZCMI
OLD_FILES+=usr/share/groff_font/devps/ZD
OLD_FILES+=usr/share/groff_font/devps/ZDR
OLD_FILES+=usr/share/groff_font/devps/afmname
OLD_FILES+=usr/share/groff_font/devps/dingbats.map
OLD_FILES+=usr/share/groff_font/devps/dingbats.rmap
OLD_FILES+=usr/share/groff_font/devps/download
OLD_FILES+=usr/share/groff_font/devps/freeeuro.pfa
OLD_FILES+=usr/share/groff_font/devps/lgreekmap
OLD_FILES+=usr/share/groff_font/devps/prologue
OLD_FILES+=usr/share/groff_font/devps/symbol.sed
OLD_FILES+=usr/share/groff_font/devps/symbolchars
OLD_FILES+=usr/share/groff_font/devps/symbolsl.afm
OLD_FILES+=usr/share/groff_font/devps/symbolsl.pfa
OLD_FILES+=usr/share/groff_font/devps/text.enc
OLD_FILES+=usr/share/groff_font/devps/textmap
OLD_FILES+=usr/share/groff_font/devps/zapfdr.pfa
OLD_DIRS+=usr/share/groff_font/devps
OLD_FILES+=usr/share/groff_font/devutf8/B
OLD_FILES+=usr/share/groff_font/devutf8/BI
OLD_FILES+=usr/share/groff_font/devutf8/CW
OLD_FILES+=usr/share/groff_font/devutf8/DESC
OLD_FILES+=usr/share/groff_font/devutf8/I
OLD_FILES+=usr/share/groff_font/devutf8/L
OLD_FILES+=usr/share/groff_font/devutf8/R
OLD_FILES+=usr/share/groff_font/devutf8/S
OLD_DIRS+=usr/share/groff_font/devutf8
OLD_DIRS+=usr/share/groff_font
OLD_FILES+=usr/share/man/man1/addftinfo.1.gz
OLD_FILES+=usr/share/man/man1/afmtodit.1.gz
OLD_FILES+=usr/share/man/man1/checknr.1.gz
OLD_FILES+=usr/share/man/man1/colcrt.1.gz
OLD_FILES+=usr/share/man/man1/eqn.1.gz
OLD_FILES+=usr/share/man/man1/grn.1.gz
OLD_FILES+=usr/share/man/man1/grodvi.1.gz
OLD_FILES+=usr/share/man/man1/groff.1.gz
OLD_FILES+=usr/share/man/man1/grog.1.gz
OLD_FILES+=usr/share/man/man1/grolbp.1.gz
OLD_FILES+=usr/share/man/man1/grolj4.1.gz
OLD_FILES+=usr/share/man/man1/grops.1.gz
OLD_FILES+=usr/share/man/man1/grotty.1.gz
OLD_FILES+=usr/share/man/man1/hpftodit.1.gz
OLD_FILES+=usr/share/man/man1/indxbib.1.gz
OLD_FILES+=usr/share/man/man1/lkbib.1.gz
OLD_FILES+=usr/share/man/man1/lookbib.1.gz
OLD_FILES+=usr/share/man/man1/mmroff.1.gz
OLD_FILES+=usr/share/man/man1/neqn.1.gz
OLD_FILES+=usr/share/man/man1/nroff.1.gz
OLD_FILES+=usr/share/man/man1/pfbtops.1.gz
OLD_FILES+=usr/share/man/man1/pic.1.gz
OLD_FILES+=usr/share/man/man1/psroff.1.gz
OLD_FILES+=usr/share/man/man1/refer.1.gz
OLD_FILES+=usr/share/man/man1/tbl.1.gz
OLD_FILES+=usr/share/man/man1/tfmtodit.1.gz
OLD_FILES+=usr/share/man/man1/troff.1.gz
OLD_FILES+=usr/share/man/man1/vgrind.1.gz
OLD_FILES+=usr/share/man/man5/groff_font.5.gz
OLD_FILES+=usr/share/man/man5/groff_out.5.gz
OLD_FILES+=usr/share/man/man5/groff_tmac.5.gz
OLD_FILES+=usr/share/man/man5/lj4_font.5.gz
OLD_FILES+=usr/share/man/man5/tmac.5.gz
OLD_FILES+=usr/share/man/man5/vgrindefs.5.gz
OLD_FILES+=usr/share/man/man7/ditroff.7.gz
OLD_FILES+=usr/share/man/man7/groff.7.gz
OLD_FILES+=usr/share/man/man7/groff_char.7.gz
OLD_FILES+=usr/share/man/man7/groff_diff.7.gz
OLD_FILES+=usr/share/man/man7/groff_man.7.gz
OLD_FILES+=usr/share/man/man7/groff_mdoc.7.gz
OLD_FILES+=usr/share/man/man7/groff_me.7.gz
OLD_FILES+=usr/share/man/man7/groff_mm.7.gz
OLD_FILES+=usr/share/man/man7/groff_mmse.7.gz
OLD_FILES+=usr/share/man/man7/groff_ms.7.gz
OLD_FILES+=usr/share/man/man7/groff_trace.7.gz
OLD_FILES+=usr/share/man/man7/groff_www.7.gz
OLD_FILES+=usr/share/man/man7/mdoc.samples.7.gz
OLD_FILES+=usr/share/man/man7/me.7.gz
OLD_FILES+=usr/share/man/man7/mm.7.gz
OLD_FILES+=usr/share/man/man7/mmse.7.gz
OLD_FILES+=usr/share/man/man7/ms.7.gz
OLD_FILES+=usr/share/man/man7/orig_me.7.gz
OLD_FILES+=usr/share/me/acm.me
OLD_FILES+=usr/share/me/chars.me
OLD_FILES+=usr/share/me/deltext.me
OLD_FILES+=usr/share/me/eqn.me
OLD_FILES+=usr/share/me/float.me
OLD_FILES+=usr/share/me/footnote.me
OLD_FILES+=usr/share/me/index.me
OLD_FILES+=usr/share/me/letterhead.me
OLD_FILES+=usr/share/me/local.me
OLD_FILES+=usr/share/me/null.me
OLD_FILES+=usr/share/me/refer.me
OLD_FILES+=usr/share/me/revisions
OLD_FILES+=usr/share/me/sh.me
OLD_FILES+=usr/share/me/tbl.me
OLD_FILES+=usr/share/me/thesis.me
OLD_DIRS+=usr/share/me
OLD_FILES+=usr/share/misc/vgrindefs
OLD_FILES+=usr/share/misc/vgrindefs.db
OLD_FILES+=usr/share/tmac/X.tmac
OLD_FILES+=usr/share/tmac/Xps.tmac
OLD_FILES+=usr/share/tmac/a4.tmac
OLD_FILES+=usr/share/tmac/an-old.tmac
OLD_FILES+=usr/share/tmac/an.tmac
OLD_FILES+=usr/share/tmac/andoc.tmac
OLD_FILES+=usr/share/tmac/composite.tmac
OLD_FILES+=usr/share/tmac/cp1047.tmac
OLD_FILES+=usr/share/tmac/devtag.tmac
OLD_FILES+=usr/share/tmac/doc.tmac
OLD_FILES+=usr/share/tmac/dvi.tmac
OLD_FILES+=usr/share/tmac/e.tmac
OLD_FILES+=usr/share/tmac/ec.tmac
OLD_FILES+=usr/share/tmac/eqnrc
OLD_FILES+=usr/share/tmac/europs.tmac
OLD_FILES+=usr/share/tmac/html-end.tmac
OLD_FILES+=usr/share/tmac/html.tmac
OLD_FILES+=usr/share/tmac/hyphen.ru
OLD_FILES+=usr/share/tmac/hyphen.us
OLD_FILES+=usr/share/tmac/hyphenex.us
OLD_FILES+=usr/share/tmac/koi8-r.tmac
OLD_FILES+=usr/share/tmac/latin1.tmac
OLD_FILES+=usr/share/tmac/latin2.tmac
OLD_FILES+=usr/share/tmac/latin9.tmac
OLD_FILES+=usr/share/tmac/lbp.tmac
OLD_FILES+=usr/share/tmac/lj4.tmac
OLD_FILES+=usr/share/tmac/m.tmac
OLD_FILES+=usr/share/tmac/man.local
OLD_FILES+=usr/share/tmac/man.tmac
OLD_FILES+=usr/share/tmac/mandoc.tmac
OLD_FILES+=usr/share/tmac/mdoc.local
OLD_FILES+=usr/share/tmac/mdoc.tmac
OLD_FILES+=usr/share/tmac/mdoc/doc-common
OLD_FILES+=usr/share/tmac/mdoc/doc-ditroff
OLD_FILES+=usr/share/tmac/mdoc/doc-nroff
OLD_FILES+=usr/share/tmac/mdoc/doc-syms
OLD_FILES+=usr/share/tmac/mdoc/fr.ISO8859-1
OLD_FILES+=usr/share/tmac/mdoc/ru.KOI8-R
OLD_DIRS+=usr/share/tmac/mdoc
OLD_FILES+=usr/share/tmac/me.tmac
OLD_FILES+=usr/share/tmac/mm/0.MT
OLD_FILES+=usr/share/tmac/mm/4.MT
OLD_FILES+=usr/share/tmac/mm/5.MT
OLD_FILES+=usr/share/tmac/mm/locale
OLD_FILES+=usr/share/tmac/mm/mm.tmac
OLD_FILES+=usr/share/tmac/mm/mmse.tmac
OLD_FILES+=usr/share/tmac/mm/ms.cov
OLD_FILES+=usr/share/tmac/mm/se_locale
OLD_FILES+=usr/share/tmac/mm/se_ms.cov
OLD_DIRS+=usr/share/tmac/mm
OLD_FILES+=usr/share/tmac/ms.tmac
OLD_FILES+=usr/share/tmac/mse.tmac
OLD_FILES+=usr/share/tmac/papersize.tmac
OLD_FILES+=usr/share/tmac/pic.tmac
OLD_FILES+=usr/share/tmac/ps.tmac
OLD_FILES+=usr/share/tmac/psatk.tmac
OLD_FILES+=usr/share/tmac/psold.tmac
OLD_FILES+=usr/share/tmac/pspic.tmac
OLD_FILES+=usr/share/tmac/s.tmac
OLD_FILES+=usr/share/tmac/safer.tmac
OLD_FILES+=usr/share/tmac/tmac.orig_me
OLD_FILES+=usr/share/tmac/tmac.vgrind
OLD_FILES+=usr/share/tmac/trace.tmac
OLD_FILES+=usr/share/tmac/troffrc
OLD_FILES+=usr/share/tmac/troffrc-end
OLD_FILES+=usr/share/tmac/tty-char.tmac
OLD_FILES+=usr/share/tmac/tty.tmac
OLD_FILES+=usr/share/tmac/unicode.tmac
OLD_FILES+=usr/share/tmac/www.tmac
OLD_DIRS+=usr/share/tmac
# 20170607: remove incorrect atf_check(1) manpage link
OLD_FILES+=usr/share/man/man1/atf_check.1.gz
# 20170601: remove stale manpage
OLD_FILES+=usr/share/man/man2/cap_rights_get.2.gz
# 20170601: old libifconfig and libifc
OLD_FILES+=usr/lib/libifc.a
OLD_FILES+=usr/lib/libifc_p.a
OLD_FILES+=usr/lib/libifconfig.a
OLD_FILES+=usr/lib/libifconfig_p.a
# 20170529: mount.conf(8) -> mount.conf(5)
OLD_FILES+=usr/share/man/man8/mount.conf.8.gz
# 20170525: remove misleading template
OLD_FILES+=usr/share/misc/man.template
# 20170525: disconnect the roff docs from the build
OLD_FILES+=usr/share/doc/papers/beyond43.ascii.gz
OLD_FILES+=usr/share/doc/papers/bio.ascii.gz
OLD_FILES+=usr/share/doc/papers/contents.ascii.gz
OLD_FILES+=usr/share/doc/papers/devfs.ascii.gz
OLD_FILES+=usr/share/doc/papers/diskperf.ascii.gz
OLD_FILES+=usr/share/doc/papers/fsinterface.ascii.gz
OLD_FILES+=usr/share/doc/papers/hwpmc.ascii.gz
OLD_FILES+=usr/share/doc/papers/jail.ascii.gz
OLD_FILES+=usr/share/doc/papers/kernmalloc.ascii.gz
OLD_FILES+=usr/share/doc/papers/kerntune.ascii.gz
OLD_FILES+=usr/share/doc/papers/malloc.ascii.gz
OLD_FILES+=usr/share/doc/papers/newvm.ascii.gz
OLD_FILES+=usr/share/doc/papers/releng.ascii.gz
OLD_FILES+=usr/share/doc/papers/sysperf.ascii.gz
OLD_FILES+=usr/share/doc/papers/timecounter.ascii.gz
OLD_DIRS+=usr/share/doc/papers
OLD_FILES+=usr/share/doc/psd/01.cacm/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/01.cacm
OLD_FILES+=usr/share/doc/psd/02.implement/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/02.implement
OLD_FILES+=usr/share/doc/psd/03.iosys/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/03.iosys
OLD_FILES+=usr/share/doc/psd/04.uprog/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/04.uprog
OLD_FILES+=usr/share/doc/psd/05.sysman/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/05.sysman
OLD_FILES+=usr/share/doc/psd/06.Clang/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/06.Clang
OLD_FILES+=usr/share/doc/psd/12.make/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/12.make
OLD_FILES+=usr/share/doc/psd/13.rcs/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/13.rcs
OLD_FILES+=usr/share/doc/psd/13.rcs/rcs_func.ascii.gz
OLD_DIRS+=usr/share/doc/psd/13.rcs
OLD_FILES+=usr/share/doc/psd/15.yacc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/15.yacc
OLD_FILES+=usr/share/doc/psd/16.lex/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/16.lex
OLD_FILES+=usr/share/doc/psd/17.m4/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/17.m4
OLD_FILES+=usr/share/doc/psd/18.gprof/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/18.gprof
OLD_FILES+=usr/share/doc/psd/20.ipctut/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/20.ipctut
OLD_FILES+=usr/share/doc/psd/21.ipc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/21.ipc
OLD_FILES+=usr/share/doc/psd/22.rpcgen/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/22.rpcgen
OLD_FILES+=usr/share/doc/psd/23.rpc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/23.rpc
OLD_FILES+=usr/share/doc/psd/24.xdr/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/24.xdr
OLD_FILES+=usr/share/doc/psd/25.xdrrfc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/25.xdrrfc
OLD_FILES+=usr/share/doc/psd/26.rpcrfc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/26.rpcrfc
OLD_FILES+=usr/share/doc/psd/27.nfsrfc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/27.nfsrfc
OLD_FILES+=usr/share/doc/psd/Title.ascii.gz
OLD_FILES+=usr/share/doc/psd/contents.ascii.gz
OLD_DIRS+=usr/share/doc/psd/
OLD_FILES+=usr/share/doc/smm/01.setup/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/01.setup
OLD_FILES+=usr/share/doc/smm/02.config/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/02.config
OLD_FILES+=usr/share/doc/smm/03.fsck/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/03.fsck
OLD_FILES+=usr/share/doc/smm/04.quotas/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/04.quotas
OLD_FILES+=usr/share/doc/smm/05.fastfs/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/05.fastfs
OLD_FILES+=usr/share/doc/smm/06.nfs/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/06.nfs
OLD_FILES+=usr/share/doc/smm/07.lpd/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/07.lpd
OLD_FILES+=usr/share/doc/smm/08.sendmailop/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/08.sendmailop
OLD_FILES+=usr/share/doc/smm/11.timedop/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/11.timedop
OLD_FILES+=usr/share/doc/smm/12.timed/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/12.timed
OLD_FILES+=usr/share/doc/smm/18.net/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/18.net
OLD_FILES+=usr/share/doc/smm/Title.ascii.gz
OLD_FILES+=usr/share/doc/smm/contents.ascii.gz
OLD_DIRS+=usr/share/doc/smm
OLD_FILES+=usr/share/doc/usd/04.csh/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/04.csh
OLD_FILES+=usr/share/doc/usd/05.dc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/05.dc
OLD_FILES+=usr/share/doc/usd/06.bc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/06.bc
OLD_FILES+=usr/share/doc/usd/07.mail/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/07.mail
OLD_FILES+=usr/share/doc/usd/10.exref/paper.ascii.gz
OLD_FILES+=usr/share/doc/usd/10.exref/summary.ascii.gz
OLD_DIRS+=usr/share/doc/usd/10.exref
OLD_FILES+=usr/share/doc/usd/11.edit/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/11.edit
OLD_FILES+=usr/share/doc/usd/12.vi/paper.ascii.gz
OLD_FILES+=usr/share/doc/usd/12.vi/summary.ascii.gz
OLD_FILES+=usr/share/doc/usd/12.vi/viapwh.ascii.gz
OLD_DIRS+=usr/share/doc/usd/12.vi
OLD_FILES+=usr/share/doc/usd/13.viref/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/13.viref
OLD_FILES+=usr/share/doc/usd/18.msdiffs/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/18.msdiffs
OLD_FILES+=usr/share/doc/usd/19.memacros/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/19.memacros
OLD_FILES+=usr/share/doc/usd/20.meref/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/20.meref
OLD_FILES+=usr/share/doc/usd/21.troff/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/21.troff
OLD_FILES+=usr/share/doc/usd/22.trofftut/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/22.trofftut
OLD_FILES+=usr/share/doc/usd/Title.ascii.gz
OLD_FILES+=usr/share/doc/usd/contents.ascii.gz
OLD_DIRS+=usr/share/doc/usd
# 20170523: 64-bit inode support, library version bumps
OLD_LIBS+=lib/libzfs.so.2
OLD_LIBS+=usr/lib/libarchive.so.6
OLD_LIBS+=usr/lib/libmilter.so.5
# 20170427: NATM configuration support removed
OLD_FILES+=etc/rc.d/atm1
OLD_FILES+=etc/rc.d/atm2
OLD_FILES+=etc/rc.d/atm3
OLD_FILES+=usr/share/man/man8/rc.atm.8.gz
# 20170426: UMA_ZONE_REFCNT removed
OLD_FILES+=usr/share/man/man9/uma_find_refcnt.9.gz
# 20170424: NATM support removed
OLD_FILES+=rescue/atmconfig
OLD_FILES+=sbin/atmconfig
OLD_FILES+=usr/include/bsnmp/snmp_atm.h
OLD_FILES+=usr/include/dev/utopia/idtphy.h
OLD_FILES+=usr/include/dev/utopia/suni.h
OLD_FILES+=usr/include/dev/utopia/utopia.h
OLD_FILES+=usr/include/dev/utopia/utopia_priv.h
OLD_DIRS+=usr/include/dev/utopia
OLD_FILES+=usr/include/net/if_atm.h
OLD_FILES+=usr/include/netgraph/atm/ng_atm.h
OLD_FILES+=usr/include/netinet/if_atm.h
OLD_FILES+=usr/include/netnatm/natm.h
OLD_FILES+=usr/lib/snmp_atm.so
OLD_LIBS+=usr/lib/snmp_atm.so.6
OLD_FILES+=usr/share/doc/atm/atmconfig.help
OLD_FILES+=usr/share/doc/atm/atmconfig_device.help
OLD_DIRS+=usr/share/doc/atm
OLD_FILES+=usr/share/man/man3/snmp_atm.3.gz
OLD_FILES+=usr/share/man/man4/en.4.gz
OLD_FILES+=usr/share/man/man4/fatm.4.gz
OLD_FILES+=usr/share/man/man4/hatm.4.gz
OLD_FILES+=usr/share/man/man4/if_en.4.gz
OLD_FILES+=usr/share/man/man4/if_fatm.4.gz
OLD_FILES+=usr/share/man/man4/if_hatm.4.gz
OLD_FILES+=usr/share/man/man4/if_patm.4.gz
OLD_FILES+=usr/share/man/man4/natm.4.gz
OLD_FILES+=usr/share/man/man4/natmip.4.gz
OLD_FILES+=usr/share/man/man4/ng_atm.4.gz
OLD_FILES+=usr/share/man/man4/patm.4.gz
OLD_FILES+=usr/share/man/man4/utopia.4.gz
OLD_FILES+=usr/share/man/man8/atmconfig.8.gz
OLD_FILES+=usr/share/man/man9/utopia.9.gz
OLD_FILES+=usr/share/snmp/defs/atm_freebsd.def
OLD_FILES+=usr/share/snmp/defs/atm_tree.def
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-ATM-FREEBSD-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-ATM.txt
# 20170420: remove GNU diff
OLD_FILES+=usr/share/man/man7/diff.7.gz
# 20170322: rename <x> to <x>_test to match the FreeBSD test suite name scheme
OLD_FILES+=usr/tests/usr.bin/col/col
OLD_FILES+=usr/tests/usr.bin/diff/diff
OLD_FILES+=usr/tests/usr.bin/ident/ident
OLD_FILES+=usr/tests/usr.bin/mkimg/mkimg
OLD_FILES+=usr/tests/usr.bin/sdiff/sdiff
OLD_FILES+=usr/tests/usr.bin/soelim/soelim
OLD_FILES+=usr/tests/usr.sbin/pw/pw_config
OLD_FILES+=usr/tests/usr.sbin/pw/pw_etcdir
OLD_FILES+=usr/tests/usr.sbin/pw/pw_groupadd
OLD_FILES+=usr/tests/usr.sbin/pw/pw_groupdel
OLD_FILES+=usr/tests/usr.sbin/pw/pw_groupmod
OLD_FILES+=usr/tests/usr.sbin/pw/pw_lock
OLD_FILES+=usr/tests/usr.sbin/pw/pw_useradd
OLD_FILES+=usr/tests/usr.sbin/pw/pw_userdel
OLD_FILES+=usr/tests/usr.sbin/pw/pw_usermod
OLD_FILES+=usr/tests/usr.sbin/pw/pw_usernext
# 20170319: io_test requires zh_TW.Big5 locale
OLD_FILES+=usr/tests/lib/libc/locale/io_test
# 20170319: remove nls for non supported Big5* locales
OLD_DIRS+=usr/share/nls/zh_HK.Big5HKSCS
OLD_DIRS+=usr/share/nls/zh_TW.Big5
# 20170313: move .../sys/geom/eli/... to .../sys/geom/class/eli/...
OLD_FILES+=usr/tests/sys/geom/eli/pbkdf2/pbkdf2
OLD_FILES+=usr/tests/sys/geom/eli/pbkdf2/Kyuafile
OLD_FILES+=usr/tests/sys/geom/eli/Kyuafile
OLD_DIRS+=usr/tests/sys/geom/eli/pbkdf2
OLD_DIRS+=usr/tests/sys/geom/eli
# 20170313: sbin/ipftest and ipresend temporarily disconnected
OLD_FILES+=sbin/ipftest
OLD_FILES+=sbin/ipresend
OLD_FILES+=usr/share/man/man1/ipftest.1.gz
OLD_FILES+=usr/share/man/man1/ipresend.1.gz
# 20170311: Remove WITHOUT_MANDOCDB option
OLD_FILES+=usr/share/man/man1/makewhatis.1.gz
# 20170308: rename some tests
OLD_FILES+=usr/tests/bin/pwait/pwait
OLD_FILES+=usr/tests/usr.bin/timeout/timeout
# 20170307: remove pcap-int.h
OLD_FILES+=usr/include/pcap-int.h
# 20170302: new libc++ import which bumps version from 3.9.1 to 4.0.0
OLD_FILES+=usr/include/c++/v1/__undef___deallocate
OLD_FILES+=usr/include/c++/v1/tr1/__undef___deallocate
# 20170302: new clang import which bumps version from 3.9.1 to 4.0.0
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/3.9.1/include/sanitizer
OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.9.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/altivec.h
OLD_FILES+=usr/lib/clang/3.9.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.9.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.9.1/include/cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/3.9.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/htmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.9.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.9.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.9.1/include/msa.h
OLD_FILES+=usr/lib/clang/3.9.1/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/opencl-c.h
OLD_FILES+=usr/lib/clang/3.9.1/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/s390intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/vadefs.h
OLD_FILES+=usr/lib/clang/3.9.1/include/vecintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xopintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/3.9.1/include
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.9.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.9.1/lib
OLD_DIRS+=usr/lib/clang/3.9.1
# 20170226: SVR4 compatibility removed
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/share/man/man4/streams.4
OLD_FILES+=usr/share/man/man4/svr4.4
.endif
# 20170219: OpenPAM RADULA upgrade removed the libpam tests
OLD_FILES+=usr/tests/lib/libpam/Kyuafile
OLD_FILES+=usr/tests/lib/libpam/t_openpam_ctype
OLD_FILES+=usr/tests/lib/libpam/t_openpam_readlinev
OLD_FILES+=usr/tests/lib/libpam/t_openpam_readword
OLD_DIRS+=usr/test/lib/libpam
# 20170216: remove ahb(4)
OLD_FILES+=usr/share/man/man4/ahb.4.gz
# 20170216: remove fea(4)
OLD_FILES+=usr/share/man/man4/fea.4.gz
# 20170206: remove bdes(1)
OLD_FILES+=usr/bin/bdes
OLD_FILES+=usr/share/man/man1/bdes.1.gz
# 20170206: merged projects/ipsec
OLD_FILES+=usr/include/netinet/ip_ipsec.h
OLD_FILES+=usr/include/netinet6/ip6_ipsec.h
# 20170128: remove pc98 support
OLD_FILES+=usr/include/dev/ic/i8251.h
OLD_FILES+=usr/include/dev/ic/i8255.h
OLD_FILES+=usr/include/dev/ic/rsa.h
OLD_FILES+=usr/include/dev/ic/wd33c93reg.h
OLD_FILES+=usr/include/sys/disk/pc98.h
OLD_FILES+=usr/include/sys/diskpc98.h
OLD_FILES+=usr/share/man/man4/i386/ct.4.gz
OLD_FILES+=usr/share/man/man4/i386/snc.4.gz
OLD_FILES+=usr/share/syscons/keymaps/jp.pc98.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/jp.pc98.kbd
OLD_FILES+=usr/share/vt/keymaps/jp.pc98.iso.kbd
OLD_FILES+=usr/share/vt/keymaps/jp.pc98.kbd
# 20170110: Four files from ggate tests consolidated into one
OLD_FILES+=usr/tests/sys/geom/class/gate/1_test
OLD_FILES+=usr/tests/sys/geom/class/gate/2_test
OLD_FILES+=usr/tests/sys/geom/class/gate/3_test
OLD_FILES+=usr/tests/sys/geom/class/gate/conf.sh
# 20170103: libbsnmptools.so made into an INTERNALLIB
OLD_FILES+=usr/lib/libbsnmptools.a
OLD_FILES+=usr/lib/libbsnmptools_p.a
OLD_LIBS+=usr/lib/libbsnmptools.so.0
OLD_FILES+=usr/lib/libbsnmptools.so
# 20170102: sysdecode_getfsstat_flags() renamed to sysdecode_getfsstat_mode()
OLD_FILES+=usr/share/man/man3/sysdecode_getfsstat_flags.3.gz
# 20161230: libarchive ACL pax test renamed to test_acl_pax_posix1e.tar.uu
OLD_FILES+=usr/tests/lib/libarchive/test_acl_pax.tar.uu
# 20161229: Three files from gnop tests consolidated into one
OLD_FILES+=usr/tests/sys/geom/class/nop/1_test
OLD_FILES+=usr/tests/sys/geom/class/nop/2_test
OLD_FILES+=usr/tests/sys/geom/class/nop/conf.sh
# 20161217: new clang import which bumps version from 3.9.0 to 3.9.1
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/3.9.0/include/sanitizer
OLD_FILES+=usr/lib/clang/3.9.0/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/3.9.0/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/3.9.0/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/3.9.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/3.9.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.9.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.9.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.9.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/altivec.h
OLD_FILES+=usr/lib/clang/3.9.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.9.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.9.0/include/cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/3.9.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.9.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.9.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.9.0/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/opencl-c.h
OLD_FILES+=usr/lib/clang/3.9.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/3.9.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/3.9.0/include
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.9.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.9.0/lib
OLD_DIRS+=usr/lib/clang/3.9.0
# 20161205: libproc version bump
OLD_LIBS+=usr/lib/libproc.so.3
# 20161127: Remove vm_page_cache(9)
OLD_FILES+=usr/share/man/man9/vm_page_cache.9.gz
# 20161124: new clang import which bumps version from 3.8.0 to 3.9.0
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/3.8.0/include/sanitizer
OLD_FILES+=usr/lib/clang/3.8.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/3.8.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.8.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.8.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.8.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/altivec.h
OLD_FILES+=usr/lib/clang/3.8.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.8.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.8.0/include/cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/3.8.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.8.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.8.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.8.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/3.8.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/3.8.0/include
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.8.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.8.0/lib
OLD_DIRS+=usr/lib/clang/3.8.0
# 20161121: Hyper-V manuals only apply to amd64 and i386
.if ${TARGET_ARCH} != "amd64" && ${TARGET_ARCH} != "i386"
OLD_FILES+=usr/share/man/man4/hv_kvp.4.gz
OLD_FILES+=usr/share/man/man4/hv_netvsc.4.gz
OLD_FILES+=usr/share/man/man4/hv_storvsc.4.gz
OLD_FILES+=usr/share/man/man4/hv_utils.4.gz
OLD_FILES+=usr/share/man/man4/hv_vmbus.4.gz
OLD_FILES+=usr/share/man/man4/hv_vss.4.gz
.endif
# 20161118: Remove hv_ata_pci_disengage(4)
OLD_FILES+=usr/share/man/man4/hv_ata_pci_disengage.4.gz
# 20161017: urtwn(4) was merged into rtwn(4)
OLD_FILES+=usr/share/man/man4/if_urtwn.4.gz
OLD_FILES+=usr/share/man/man4/urtwn.4.gz
OLD_FILES+=usr/share/man/man4/urtwnfw.4.gz
# 20161015: Remove GNU rcs
OLD_FILES+=usr/bin/ci
OLD_FILES+=usr/bin/co
OLD_FILES+=usr/bin/merge
OLD_FILES+=usr/bin/rcs
OLD_FILES+=usr/bin/rcsclean
OLD_FILES+=usr/bin/rcsdiff
OLD_FILES+=usr/bin/rcsfreeze
OLD_FILES+=usr/bin/rcsmerge
OLD_FILES+=usr/bin/rlog
OLD_FILES+=usr/share/doc/psd/13.rcs/paper.ascii.gz
OLD_FILES+=usr/share/doc/psd/13.rcs/rcs_func.ascii.gz
OLD_DIRS+=usr/share/doc/psd/13.rcs
OLD_FILES+=usr/share/man/man1/ci.1.gz
OLD_FILES+=usr/share/man/man1/co.1.gz
OLD_FILES+=usr/share/man/man1/merge.1.gz
OLD_FILES+=usr/share/man/man1/rcs.1.gz
OLD_FILES+=usr/share/man/man1/rcsclean.1.gz
OLD_FILES+=usr/share/man/man1/rcsdiff.1.gz
OLD_FILES+=usr/share/man/man1/rcsfreeze.1.gz
OLD_FILES+=usr/share/man/man1/rcsintro.1.gz
OLD_FILES+=usr/share/man/man1/rcsmerge.1.gz
OLD_FILES+=usr/share/man/man1/rlog.1.gz
OLD_FILES+=usr/share/man/man5/rcsfile.5.gz
# 20161010: remove link to removed m_getclr(9) macro
OLD_FILES+=usr/share/man/man9/m_getclr.9.gz
# 20161003: MK_ELFCOPY_AS_OBJCOPY option retired
OLD_FILES+=usr/bin/elfcopy
OLD_FILES+=usr/share/man/man1/elfcopy.1.gz
# 20160906: libkqueue tests moved to /usr/tests/sys/kqueue/libkqueue
OLD_FILES+=usr/tests/sys/kqueue/kqtest
OLD_FILES+=usr/tests/sys/kqueue/kqueue_test
# 20160903: idle page zeroing support removed
OLD_FILES+=usr/share/man/man9/pmap_zero_idle.9.gz
# 20160901: Remove digi(4)
OLD_FILES+=usr/share/man/man4/digi.4.gz
# 20160819: Remove ie(4)
OLD_FILES+=usr/share/man/man4/i386/ie.4.gz
# 20160819: Remove spic(4)
OLD_FILES+=usr/share/man/man4/spic.4.gz
# 20160819: Remove wl(4) and wlconfig(8)
OLD_FILES+=usr/share/man/man4/i386/wl.4.gz
OLD_FILES+=usr/sbin/wlconfig
OLD_FILES+=usr/share/man/man8/i386/wlconfig.8.gz
# 20160819: Remove si(4) and sicontrol(8)
OLD_FILES+=usr/share/man/man4/si.4.gz
OLD_FILES+=usr/sbin/sicontrol
OLD_FILES+=usr/share/man/man8/sicontrol.8.gz
# 20160819: Remove scd(4)
OLD_FILES+=usr/share/man/man4/scd.4.gz
# 20160815: Remove mcd(4)
OLD_FILES+=usr/share/man/man4/mcd.4.gz
# 20160805: lockmgr_waiters(9) removed
OLD_FILES+=usr/share/man/man9/lockmgr_waiters.9.gz
# 20160703: POSIXify locales with variants
OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hant_TW.UTF-8
OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hant_TW.Big5
OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hant_HK.UTF-8
OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hans_CN.eucCN
OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hans_CN.UTF-8
OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hans_CN.GBK
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hans_CN.GB2312
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hans_CN.GB18030
OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/sr_Latn_RS.UTF-8
OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/sr_Latn_RS.ISO8859-2
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/sr_Cyrl_RS.UTF-8
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_TIME
OLD_DIRS+=usr/share/locale/sr_Cyrl_RS.ISO8859-5
OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/mn_Cyrl_MN.UTF-8
OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/kk_Cyrl_KZ.UTF-8
# 20160608: removed pam_verbose_error
OLD_LIBS+=usr/lib/libpam.so.5
OLD_LIBS+=usr/lib/pam_chroot.so.5
OLD_LIBS+=usr/lib/pam_deny.so.5
OLD_LIBS+=usr/lib/pam_echo.so.5
OLD_LIBS+=usr/lib/pam_exec.so.5
OLD_LIBS+=usr/lib/pam_ftpusers.so.5
OLD_LIBS+=usr/lib/pam_group.so.5
OLD_LIBS+=usr/lib/pam_guest.so.5
OLD_LIBS+=usr/lib/pam_krb5.so.5
OLD_LIBS+=usr/lib/pam_ksu.so.5
OLD_LIBS+=usr/lib/pam_lastlog.so.5
OLD_LIBS+=usr/lib/pam_login_access.so.5
OLD_LIBS+=usr/lib/pam_nologin.so.5
OLD_LIBS+=usr/lib/pam_opie.so.5
OLD_LIBS+=usr/lib/pam_opieaccess.so.5
OLD_LIBS+=usr/lib/pam_passwdqc.so.5
OLD_LIBS+=usr/lib/pam_permit.so.5
OLD_LIBS+=usr/lib/pam_radius.so.5
OLD_LIBS+=usr/lib/pam_rhosts.so.5
OLD_LIBS+=usr/lib/pam_rootok.so.5
OLD_LIBS+=usr/lib/pam_securetty.so.5
OLD_LIBS+=usr/lib/pam_self.so.5
OLD_LIBS+=usr/lib/pam_ssh.so.5
OLD_LIBS+=usr/lib/pam_tacplus.so.5
OLD_LIBS+=usr/lib/pam_unix.so.5
# 20160523: remove extranous ALTQ files
OLD_FILES+=usr/include/altq/altq_codel.h
OLD_FILES+=usr/include/altq/altq_fairq.h
# 20160519: remove DTrace Toolkit from base
OLD_FILES+=usr/sbin/dtruss
OLD_FILES+=usr/share/dtrace/toolkit/execsnoop
OLD_FILES+=usr/share/dtrace/toolkit/hotkernel
OLD_FILES+=usr/share/dtrace/toolkit/hotuser
OLD_FILES+=usr/share/dtrace/toolkit/opensnoop
OLD_FILES+=usr/share/dtrace/toolkit/procsystime
OLD_DIRS+=usr/share/dtrace/toolkit
OLD_FILES+=usr/share/man/man1/dtruss.1.gz
# 20160519: stale MLINK removed
OLD_FILES+=usr/share/man/man9/rman_await_resource.9.gz
# 20160517: ReiserFS removed
OLD_FILES+=usr/share/man/man5/reiserfs.5.gz
# 20160504: tests rework
OLD_FILES+=usr/tests/lib/libc/regex/data/README
# 20160430: kvm_getfiles(3) removed from kvm(3)
OLD_LIBS+=lib/libkvm.so.6
OLD_FILES+=usr/share/man/man3/kvm_getfiles.3.gz
# 20160423: remove mroute6d
OLD_FILES+=etc/rc.d/mroute6d
# 20160419: rename units.lib -> definitions.units
OLD_FILES+=usr/share/misc/units.lib
# 20160419: remove Big5HKSCS locales
OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_TIME
OLD_DIRS+=usr/share/locale/zh_HK.Big5HKSCS
OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hant_HK.Big5HKSCS
# 20160317: rman_res_t size bump to uintmax_t
OLD_LIBS+=usr/lib/libdevinfo.so.5
# 20160305: new clang import which bumps version from 3.7.1 to 3.8.0
OLD_FILES+=usr/bin/macho-dump
OLD_FILES+=usr/bin/tblgen
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/3.7.1/include/sanitizer
OLD_FILES+=usr/lib/clang/3.7.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.7.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.7.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.7.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/altivec.h
OLD_FILES+=usr/lib/clang/3.7.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.7.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.7.1/include/cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/3.7.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/htmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.7.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.7.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.7.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/s390intrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/vadefs.h
OLD_FILES+=usr/lib/clang/3.7.1/include/vecintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/xopintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/3.7.1/include
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.7.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.7.1/lib
OLD_DIRS+=usr/lib/clang/3.7.1
OLD_FILES+=usr/share/man/man1/tblgen.1.gz
# 20160301: Remove taskqueue_enqueue_fast
OLD_FILES+=usr/share/man/man9/taskqueue_enqueue_fast.9.gz
# 20160225: Remove casperd and libcapsicum
OLD_FILES+=sbin/casperd
OLD_FILES+=etc/rc.d/casperd
OLD_FILES+=usr/share/man/man8/casperd.8.gz
OLD_FILES+=usr/include/libcapsicum.h
OLD_FILES+=usr/include/libcapsicum_service.h
OLD_FILES+=usr/include/libcapsicum.h
OLD_FILES+=usr/share/man/man3/libcapsicum.3.gz
OLD_FILES+=usr/include/libcapsicum_dns.h
OLD_FILES+=usr/include/libcapsicum_grp.h
OLD_FILES+=usr/include/libcapsicum_impl.h
OLD_FILES+=usr/include/libcapsicum_pwd.h
OLD_FILES+=usr/include/libcapsicum_random.h
OLD_FILES+=usr/include/libcapsicum_sysctl.h
OLD_FILES+=libexec/casper/dns
OLD_FILES+=libexec/casper/grp
OLD_FILES+=libexec/casper/pwd
OLD_FILES+=libexec/casper/random
OLD_FILES+=libexec/casper/sysctl
OLD_FILES+=libexec/casper/.debug/random.debug
OLD_FILES+=libexec/casper/.debug/dns.debug
OLD_FILES+=libexec/casper/.debug/sysctl.debug
OLD_FILES+=libexec/casper/.debug/pwd.debug
OLD_FILES+=libexec/casper/.debug/grp.debug
OLD_DIRS+=libexec/casper/.debug
OLD_DIRS+=libexec/casper
OLD_FILES+=usr/lib/libcapsicum.a
OLD_FILES+=usr/lib/libcapsicum.so
OLD_LIBS+=lib/libcapsicum.so.0
OLD_FILES+=usr/lib/libcapsicum_p.a
# 20160223: functionality from mkulzma(1) merged into mkuzip(1)
OLD_FILES+=usr/bin/mkulzma
OLD_FILES+=usr/share/man/man4/geom_uncompress.4.gz
OLD_FILES+=usr/share/man/man8/mkulzma.8.gz
# 20160211: Remove obsolete unbound-control-setup
OLD_FILES+=usr/sbin/unbound-control-setup
# 20160121: cc.h moved
OLD_FILES+=usr/include/netinet/cc.h
# 20160116: Update mandoc to cvs snapshot 20160116
OLD_FILES+=usr/share/mdocml/example.style.css
OLD_FILES+=usr/share/mdocml/style.css
OLD_DIRS+=usr/share/mdocml
# 20160114: SA-16:06.snmpd
OLD_FILES+=usr/share/examples/etc/snmpd.config
# 20160107: GNU ld installed as ld.bfd and linked as ld
OLD_FILES+=usr/lib/debug/usr/bin/ld.debug
# 20151225: new clang import which bumps version from 3.7.0 to 3.7.1
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/3.7.0/include/sanitizer
OLD_FILES+=usr/lib/clang/3.7.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.7.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.7.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.7.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/altivec.h
OLD_FILES+=usr/lib/clang/3.7.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.7.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.7.0/include/cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/3.7.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.7.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.7.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.7.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/3.7.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/3.7.0/include
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.7.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.7.0/lib
OLD_DIRS+=usr/lib/clang/3.7.0
# 20151130: libelf moved from /usr/lib to /lib (libkvm dependency in r291406)
MOVED_LIBS+=usr/lib/libelf.so.2
# 20151115: Fox bad upgrade scheme
OLD_FILES+=usr/share/locale/zh_CN.GB18030/zh_Hans_CN.GB18030
OLD_FILES+=usr/share/locale/zh_CN.GB2312/zh_Hans_CN.GB2312
OLD_FILES+=usr/share/locale/zh_CN.GBK/zh_Hans_CN.GBK
OLD_FILES+=usr/share/locale/zh_CN.UTF-8/zh_Hans_CN.UTF-8
OLD_FILES+=usr/share/locale/zh_CN.eucCN/zh_Hans_CN.eucCN
OLD_FILES+=usr/share/locale/zh_TW.Big5/zh_Hant_TW.Big5
OLD_FILES+=usr/share/locale/zh_TW.UTF-8/zh_Hant_TW.UTF-8
# 20151107: String collation improvements
OLD_FILES+=usr/share/locale/UTF-8/LC_CTYPE
OLD_DIRS+=usr/share/locale/UTF-8
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_COLLATE
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_CTYPE
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_MESSAGES
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_MONETARY
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_NUMERIC
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_TIME
OLD_DIRS+=usr/share/locale/kk_KZ.PT154/
OLD_FILES+=usr/share/locale/la_LN.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/la_LN.ISO8859-1
OLD_FILES+=usr/share/locale/la_LN.ISO8859-13/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-13/LC_CTYPE
OLD_DIRS+=usr/share/locale/la_LN.ISO8859-13
OLD_FILES+=usr/share/locale/la_LN.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/la_LN.ISO8859-15
OLD_FILES+=usr/share/locale/la_LN.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/la_LN.ISO8859-2
OLD_FILES+=usr/share/locale/la_LN.ISO8859-4/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-4/LC_CTYPE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-4/LC_TIME
OLD_DIRS+=usr/share/locale/la_LN.ISO8859-4
OLD_FILES+=usr/share/locale/la_LN.US-ASCII/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.US-ASCII/LC_CTYPE
OLD_FILES+=usr/share/locale/la_LN.US-ASCII/LC_TIME
OLD_DIRS+=usr/share/locale/la_LN.US-ASCII
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_MESSAGES
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_TIME
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_COLLATE
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_MONETARY
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_CTYPE
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_NUMERIC
OLD_DIRS+=usr/share/locale/lt_LT.ISO8859-4
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/no_NO.ISO8859-1
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/no_NO.ISO8859-15
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/no_NO.UTF-8
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_TIME
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_MONETARY
OLD_DIRS+=usr/share/locale/sr_YU.ISO8859-2
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_TIME
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_MESSAGES
OLD_DIRS+=usr/share/locale/sr_YU.ISO8859-5
OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_TIME
OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_MESSAGES
OLD_DIRS+=usr/share/locale/sr_YU.UTF-8
# 20151101: added missing _test suffix on multiple tests in lib/libc
OLD_FILES+=usr/tests/lib/libc/c063/faccessat
OLD_FILES+=usr/tests/lib/libc/c063/fchmodat
OLD_FILES+=usr/tests/lib/libc/c063/fchownat
OLD_FILES+=usr/tests/lib/libc/c063/fexecve
OLD_FILES+=usr/tests/lib/libc/c063/fstatat
OLD_FILES+=usr/tests/lib/libc/c063/linkat
OLD_FILES+=usr/tests/lib/libc/c063/mkdirat
OLD_FILES+=usr/tests/lib/libc/c063/mkfifoat
OLD_FILES+=usr/tests/lib/libc/c063/mknodat
OLD_FILES+=usr/tests/lib/libc/c063/openat
OLD_FILES+=usr/tests/lib/libc/c063/readlinkat
OLD_FILES+=usr/tests/lib/libc/c063/renameat
OLD_FILES+=usr/tests/lib/libc/c063/symlinkat
OLD_FILES+=usr/tests/lib/libc/c063/unlinkat
OLD_FILES+=usr/tests/lib/libc/c063/utimensat
OLD_FILES+=usr/tests/lib/libc/string/memchr
OLD_FILES+=usr/tests/lib/libc/string/memcpy
OLD_FILES+=usr/tests/lib/libc/string/memmem
OLD_FILES+=usr/tests/lib/libc/string/memset
OLD_FILES+=usr/tests/lib/libc/string/strcat
OLD_FILES+=usr/tests/lib/libc/string/strchr
OLD_FILES+=usr/tests/lib/libc/string/strcmp
OLD_FILES+=usr/tests/lib/libc/string/strcpy
OLD_FILES+=usr/tests/lib/libc/string/strcspn
OLD_FILES+=usr/tests/lib/libc/string/strerror
OLD_FILES+=usr/tests/lib/libc/string/strlen
OLD_FILES+=usr/tests/lib/libc/string/strpbrk
OLD_FILES+=usr/tests/lib/libc/string/strrchr
OLD_FILES+=usr/tests/lib/libc/string/strspn
OLD_FILES+=usr/tests/lib/libc/string/swab
# 20151101: 430.status-rwho was renamed to 430.status-uptime
OLD_FILES+=etc/periodic/daily/430.status-rwho
# 20151030: OpenSSL 1.0.2d import
OLD_FILES+=usr/share/openssl/man/man3/CMS_set1_signer_certs.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_ctrl.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_ctrl_str.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_509_CRL_fp.3.gz
OLD_LIBS+=lib/libcrypto.so.7
OLD_LIBS+=usr/lib/libssl.so.7
# 20151029: LinuxKPI moved to sys/compat/linuxkpi
OLD_FILES+=usr/include/dev/usb/usb_compat_linux.h
# 20151015: test symbols moved to /usr/lib/debug
OLD_DIRS+=usr/tests/lib/atf/libatf-c++/.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/atf_c++_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/build_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/check_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/config_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/macros_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/tests_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/utils_test.debug
OLD_DIRS+=usr/tests/lib/atf/libatf-c++/detail/.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/application_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/env_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/exceptions_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/fs_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/process_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/sanity_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/text_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/version_helper.debug
OLD_DIRS+=usr/tests/lib/atf/libatf-c/.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/atf_c_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/build_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/check_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/config_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/error_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/macros_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/tc_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/tp_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/utils_test.debug
OLD_DIRS+=usr/tests/lib/atf/libatf-c/detail/.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/dynstr_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/env_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/fs_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/list_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/map_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/process_helpers.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/process_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/sanity_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/text_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/user_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/version_helper.debug
OLD_DIRS+=usr/tests/lib/atf/test-programs/.debug
OLD_FILES+=usr/tests/lib/atf/test-programs/.debug/c_helpers.debug
OLD_FILES+=usr/tests/lib/atf/test-programs/.debug/cpp_helpers.debug
OLD_DIRS+=usr/tests/lib/libc/c063/.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/faccessat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/fchmodat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/fchownat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/fexecve.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/fstatat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/linkat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/mkdirat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/mkfifoat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/mknodat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/openat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/readlinkat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/renameat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/symlinkat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/unlinkat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/utimensat.debug
OLD_DIRS+=usr/tests/lib/libc/db/.debug
OLD_FILES+=usr/tests/lib/libc/db/.debug/h_db.debug
OLD_DIRS+=usr/tests/lib/libc/gen/.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/alarm_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/arc4random_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/assert_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/basedirname_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/dir_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/floatunditf_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/fnmatch_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpclassify2_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpclassify_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpsetmask_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpsetround_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/ftok_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/getcwd_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/getgrent_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/glob_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/humanize_number_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/isnan_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/nice_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/pause_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/raise_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/realpath_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/setdomainname_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/sethostname_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/sleep_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/syslog_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/time_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/ttyname_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/vis_test.debug
OLD_DIRS+=usr/tests/lib/libc/gen/execve/.debug
OLD_FILES+=usr/tests/lib/libc/gen/execve/.debug/execve_test.debug
OLD_DIRS+=usr/tests/lib/libc/gen/posix_spawn/.debug
OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/fileactions_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/h_fileactions.debug
OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/h_spawn.debug
OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/h_spawnattr.debug
OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/spawn_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/spawnattr_test.debug
OLD_DIRS+=usr/tests/lib/libc/hash/.debug
OLD_FILES+=usr/tests/lib/libc/hash/.debug/h_hash.debug
OLD_FILES+=usr/tests/lib/libc/hash/.debug/sha2_test.debug
OLD_DIRS+=usr/tests/lib/libc/inet/.debug
OLD_FILES+=usr/tests/lib/libc/inet/.debug/inet_network_test.debug
OLD_DIRS+=usr/tests/lib/libc/locale/.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/io_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbrtowc_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbsnrtowcs_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbstowcs_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbtowc_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcscspn_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcspbrk_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcsspn_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcstod_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/wctomb_test.debug
OLD_DIRS+=usr/tests/lib/libc/net/.debug
OLD_FILES+=usr/tests/lib/libc/net/.debug/ether_aton_test.debug
OLD_FILES+=usr/tests/lib/libc/net/.debug/getprotoent_test.debug
OLD_FILES+=usr/tests/lib/libc/net/.debug/h_dns_server.debug
OLD_FILES+=usr/tests/lib/libc/net/.debug/h_nsd_recurse.debug
OLD_FILES+=usr/tests/lib/libc/net/.debug/h_protoent.debug
OLD_FILES+=usr/tests/lib/libc/net/.debug/h_servent.debug
OLD_DIRS+=usr/tests/lib/libc/regex/.debug
OLD_FILES+=usr/tests/lib/libc/regex/.debug/exhaust_test.debug
OLD_FILES+=usr/tests/lib/libc/regex/.debug/h_regex.debug
OLD_FILES+=usr/tests/lib/libc/regex/.debug/regex_att_test.debug
OLD_DIRS+=usr/tests/lib/libc/ssp/.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_fgets.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_getcwd.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_gets.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_memcpy.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_memmove.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_memset.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_raw.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_read.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_readlink.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_snprintf.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_sprintf.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_stpcpy.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_stpncpy.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strcat.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strcpy.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strncat.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strncpy.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_vsnprintf.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_vsprintf.debug
OLD_DIRS+=usr/tests/lib/libc/stdio/.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/clearerr_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fflush_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fmemopen2_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fmemopen_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fopen_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fputc_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/mktemp_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/popen_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/printf_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/scanf_test.debug
OLD_DIRS+=usr/tests/lib/libc/stdlib/.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/abs_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/atoi_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/div_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/exit_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/getenv_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/h_getopt.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/h_getopt_long.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/hsearch_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/posix_memalign_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/random_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/strtod_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/strtol_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/system_test.debug
OLD_DIRS+=usr/tests/lib/libc/string/.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/memchr.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/memcpy.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/memmem.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/memset.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strcat.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strchr.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strcmp.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strcpy.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strcspn.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strerror.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strlen.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strpbrk.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strrchr.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strspn.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/swab.debug
OLD_DIRS+=usr/tests/lib/libc/sys/.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/access_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/chroot_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/clock_gettime_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/connect_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/dup_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/fsync_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/getcontext_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/getgroups_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/getitimer_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/getlogin_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/getpid_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/getrusage_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/getsid_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/gettimeofday_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/issetugid_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/kevent_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/kill_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/link_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/listen_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/mincore_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/mkdir_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/mkfifo_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/mknod_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/mlock_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/mmap_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/mprotect_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgctl_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgget_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgrcv_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgsnd_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/msync_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/nanosleep_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/pipe2_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/pipe_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/poll_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/revoke_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/select_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/setrlimit_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/setuid_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/sigaction_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/sigqueue_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/sigtimedwait_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/socketpair_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/stat_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/timer_create_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/truncate_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/ucontext_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/umask_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/unlink_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/write_test.debug
OLD_DIRS+=usr/tests/lib/libc/termios/.debug
OLD_FILES+=usr/tests/lib/libc/termios/.debug/tcsetpgrp_test.debug
OLD_DIRS+=usr/tests/lib/libc/tls/.debug
OLD_FILES+=usr/tests/lib/libc/tls/.debug/h_tls_dlopen.so.debug
OLD_FILES+=usr/tests/lib/libc/tls/.debug/libh_tls_dynamic.so.1.debug
OLD_FILES+=usr/tests/lib/libc/tls/.debug/tls_dlopen_test.debug
OLD_FILES+=usr/tests/lib/libc/tls/.debug/tls_dynamic_test.debug
OLD_DIRS+=usr/tests/lib/libc/ttyio/.debug
OLD_FILES+=usr/tests/lib/libc/ttyio/.debug/ttyio_test.debug
OLD_DIRS+=usr/tests/lib/libcrypt/.debug
OLD_FILES+=usr/tests/lib/libcrypt/.debug/crypt_tests.debug
OLD_DIRS+=usr/tests/lib/libmp/.debug
OLD_FILES+=usr/tests/lib/libmp/.debug/legacy_test.debug
OLD_DIRS+=usr/tests/lib/libnv/.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/dnv_tests.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nv_array_tests.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nv_tests.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_add_test.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_exists_test.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_free_test.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_get_test.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_move_test.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_send_recv_test.debug
OLD_DIRS+=usr/tests/lib/libpam/.debug
OLD_FILES+=usr/tests/lib/libpam/.debug/t_openpam_ctype.debug
OLD_FILES+=usr/tests/lib/libpam/.debug/t_openpam_readlinev.debug
OLD_FILES+=usr/tests/lib/libpam/.debug/t_openpam_readword.debug
OLD_DIRS+=usr/tests/lib/libproc/.debug
OLD_FILES+=usr/tests/lib/libproc/.debug/proc_test.debug
OLD_FILES+=usr/tests/lib/libproc/.debug/target_prog.debug
OLD_DIRS+=usr/tests/lib/librt/.debug
OLD_FILES+=usr/tests/lib/librt/.debug/sched_test.debug
OLD_FILES+=usr/tests/lib/librt/.debug/sem_test.debug
OLD_DIRS+=usr/tests/lib/libthr/.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/barrier_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/cond_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/condwait_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/detach_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/equal_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/fork_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/fpu_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/h_atexit.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/h_cancel.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/h_exit.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/h_resolv.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/join_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/kill_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/mutex_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/once_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/preempt_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/rwlock_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/sem_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/siglongjmp_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/sigmask_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/sigsuspend_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/sleep_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/swapcontext_test.debug
OLD_DIRS+=usr/tests/lib/libthr/dlopen/.debug
OLD_FILES+=usr/tests/lib/libthr/dlopen/.debug/dlopen_test.debug
OLD_FILES+=usr/tests/lib/libthr/dlopen/.debug/h_pthread_dlopen.so.1.debug
OLD_FILES+=usr/tests/lib/libthr/dlopen/.debug/main_pthread_create_test.debug
OLD_DIRS+=usr/tests/lib/libutil/.debug
OLD_FILES+=usr/tests/lib/libutil/.debug/flopen_test.debug
OLD_FILES+=usr/tests/lib/libutil/.debug/grp_test.debug
OLD_FILES+=usr/tests/lib/libutil/.debug/humanize_number_test.debug
OLD_FILES+=usr/tests/lib/libutil/.debug/pidfile_test.debug
OLD_FILES+=usr/tests/lib/libutil/.debug/trimdomain-nodomain_test.debug
OLD_FILES+=usr/tests/lib/libutil/.debug/trimdomain_test.debug
OLD_DIRS+=usr/tests/lib/libxo/.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/libenc_test.so.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_01.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_02.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_03.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_04.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_05.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_06.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_07.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_08.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_09.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_10.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_11.debug
OLD_DIRS+=usr/tests/lib/msun/.debug
OLD_FILES+=usr/tests/lib/msun/.debug/acos_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/asin_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/atan_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/cbrt_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/ceil_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/cos_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/cosh_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/erf_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/exp_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/fmod_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/infinity_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/ldexp_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/log_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/pow_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/precision_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/round_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/scalbn_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/sin_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/sinh_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/sqrt_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/tan_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/tanh_test.debug
OLD_DIRS+=usr/tests/libexec/rtld-elf/.debug
OLD_FILES+=usr/tests/libexec/rtld-elf/.debug/ld_library_pathfds.debug
OLD_FILES+=usr/tests/libexec/rtld-elf/.debug/libpythagoras.so.0.debug
OLD_FILES+=usr/tests/libexec/rtld-elf/.debug/target.debug
OLD_DIRS+=usr/tests/sbin/devd/.debug
OLD_FILES+=usr/tests/sbin/devd/.debug/client_test.debug
OLD_DIRS+=usr/tests/sbin/dhclient/.debug
OLD_FILES+=usr/tests/sbin/dhclient/.debug/option-domain-search_test.debug
OLD_DIRS+=usr/tests/share/examples/tests/atf/.debug
OLD_FILES+=usr/tests/share/examples/tests/atf/.debug/printf_test.debug
OLD_DIRS+=usr/tests/share/examples/tests/plain/.debug
OLD_FILES+=usr/tests/share/examples/tests/plain/.debug/printf_test.debug
OLD_DIRS+=usr/tests/sys/aio/.debug
OLD_FILES+=usr/tests/sys/aio/.debug/aio_kqueue_test.debug
OLD_FILES+=usr/tests/sys/aio/.debug/aio_test.debug
OLD_FILES+=usr/tests/sys/aio/.debug/lio_kqueue_test.debug
OLD_DIRS+=usr/tests/sys/fifo/.debug
OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_create.debug
OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_io.debug
OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_misc.debug
OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_open.debug
OLD_DIRS+=usr/tests/sys/file/.debug
OLD_FILES+=usr/tests/sys/file/.debug/closefrom_test.debug
OLD_FILES+=usr/tests/sys/file/.debug/dup_test.debug
OLD_FILES+=usr/tests/sys/file/.debug/fcntlflags_test.debug
OLD_FILES+=usr/tests/sys/file/.debug/flock_helper.debug
OLD_FILES+=usr/tests/sys/file/.debug/ftruncate_test.debug
OLD_FILES+=usr/tests/sys/file/.debug/newfileops_on_fork_test.debug
OLD_DIRS+=usr/tests/sys/kern/.debug
OLD_FILES+=usr/tests/sys/kern/.debug/kern_descrip_test.debug
OLD_FILES+=usr/tests/sys/kern/.debug/ptrace_test.debug
OLD_FILES+=usr/tests/sys/kern/.debug/unix_seqpacket_test.debug
OLD_DIRS+=usr/tests/sys/kern/execve/.debug
OLD_FILES+=usr/tests/sys/kern/execve/.debug/execve_helper.debug
OLD_FILES+=usr/tests/sys/kern/execve/.debug/good_aout.debug
OLD_DIRS+=usr/tests/sys/kqueue/.debug
OLD_FILES+=usr/tests/sys/kqueue/.debug/kqtest.debug
OLD_DIRS+=usr/tests/sys/mqueue/.debug
OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest1.debug
OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest2.debug
OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest3.debug
OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest4.debug
OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest5.debug
OLD_DIRS+=usr/tests/sys/netinet/.debug
OLD_FILES+=usr/tests/sys/netinet/.debug/udp_dontroute.debug
OLD_DIRS+=usr/tests/sys/pjdfstest/.debug
OLD_FILES+=usr/tests/sys/pjdfstest/.debug/pjdfstest.debug
OLD_DIRS+=usr/tests/sys/vm/.debug
OLD_FILES+=usr/tests/sys/vm/.debug/mmap_test.debug
# 20151015: Rename files due to file-installed-as-dir bug
OLD_FILES+=usr/share/doc/legal/realtek
OLD_FILES+=usr/share/doc/legal/realtek/LICENSE
OLD_DIRS+=usr/share/doc/legal/realtek
OLD_DIRS+=usr/share/doc/legal/intel_ipw
OLD_FILES+=usr/share/doc/legal/intel_ipw/LICENSE
OLD_FILES+=usr/share/doc/legal/intel_iwn
OLD_FILES+=usr/share/doc/legal/intel_iwn/LICENSE
OLD_DIRS+=usr/share/doc/legal/intel_iwn
OLD_DIRS+=usr/share/doc/legal/intel_iwi
OLD_FILES+=usr/share/doc/legal/intel_iwi/LICENSE
OLD_DIRS+=usr/share/doc/legal/intel_wpi
OLD_FILES+=usr/share/doc/legal/intel_wpi/LICENSE
# 20151006: new libc++ import
OLD_FILES+=usr/include/c++/__tuple_03
OLD_FILES+=usr/include/c++/v1/__tuple_03
OLD_FILES+=usr/include/c++/v1/tr1/__tuple_03
# 20151006: new clang import which bumps version from 3.6.1 to 3.7.0
OLD_FILES+=usr/lib/clang/3.6.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.6.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.6.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.6.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/altivec.h
OLD_FILES+=usr/lib/clang/3.6.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.6.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.6.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.6.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.6.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.6.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.6.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/xopintrin.h
OLD_DIRS+=usr/lib/clang/3.6.1/include
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.san-i386.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.san-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan-i386.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.6.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.6.1/lib
OLD_DIRS+=usr/lib/clang/3.6.1
# 20150928: unused sgsmsg utility is removed
OLD_FILES+=usr/bin/sgsmsg
# 20150926: remove links to removed/unimplemented mbuf(9) macros
OLD_FILES+=usr/share/man/man9/MEXT_ADD_REF.9.gz
OLD_FILES+=usr/share/man/man9/MEXTFREE.9.gz
OLD_FILES+=usr/share/man/man9/MEXT_IS_REF.9.gz
OLD_FILES+=usr/share/man/man9/MEXT_REM_REF.9.gz
OLD_FILES+=usr/share/man/man9/MFREE.9.gz
# 20150818: *allocm() are gone in jemalloc 4.0.0
OLD_FILES+=usr/share/man/man3/allocm.3.gz
OLD_FILES+=usr/share/man/man3/dallocm.3.gz
OLD_FILES+=usr/share/man/man3/nallocm.3.gz
OLD_FILES+=usr/share/man/man3/rallocm.3.gz
OLD_FILES+=usr/share/man/man3/sallocm.3.gz
# 20150802: Remove netbsd's test on pw(8)
OLD_FILES+=usr/tests/usr.sbin/pw/pw_test
# 20150719: Remove libarchive.pc
OLD_FILES+=usr/libdata/pkgconfig/libarchive.pc
# 20150705: Rename DTrace provider man pages
OLD_FILES+=usr/share/man/man4/dtrace-io.4.gz
OLD_FILES+=usr/share/man/man4/dtrace-ip.4.gz
OLD_FILES+=usr/share/man/man4/dtrace-proc.4.gz
OLD_FILES+=usr/share/man/man4/dtrace-sched.4.gz
OLD_FILES+=usr/share/man/man4/dtrace-tcp.4.gz
OLD_FILES+=usr/share/man/man4/dtrace-udp.4.gz
# 20150704: nvlist private headers no longer installed
OLD_FILES+=usr/include/sys/nv_impl.h
OLD_FILES+=usr/include/sys/nvlist_impl.h
OLD_FILES+=usr/include/sys/nvpair_impl.h
# 20150624
OLD_LIBS+=usr/lib/libugidfw.so.4
# 20150604: Move nvlist man pages to section 9
OLD_FILES+=usr/share/man/man3/libnv.3.gz
OLD_FILES+=usr/share/man/man3/nv.3.gz
OLD_FILES+=usr/share/man/man3/nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_stringf.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_stringv.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_clone.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_create.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_destroy.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_dump.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_empty.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_error.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_type.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_fdump.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_flags.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_type.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_get_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_get_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_get_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_get_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_get_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_get_parent.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_get_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_move_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_move_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_move_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_move_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_next.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_pack.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_recv.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_send.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_set_error.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_size.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_take_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_take_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_take_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_take_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_take_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_take_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_unpack.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_xfer.3.gz
# 20150702: Remove duplicated nvlist includes
OLD_FILES+=usr/include/dnv.h
OLD_FILES+=usr/include/nv.h
# 20150528: PCI IOV device driver methods moved to a separate kobj interface
OLD_FILES+=usr/share/man/man9/PCI_ADD_VF.9.gz
OLD_FILES+=usr/share/man/man9/PCI_INIT_IOV.9.gz
OLD_FILES+=usr/share/man/man9/PCI_UNINIT_IOV.9.gz
# 20150525: new clang import which bumps version from 3.6.0 to 3.6.1
OLD_FILES+=usr/lib/clang/3.6.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.6.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.6.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.6.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/altivec.h
OLD_FILES+=usr/lib/clang/3.6.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.6.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.6.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.6.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.6.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.6.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.6.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/xopintrin.h
OLD_DIRS+=usr/lib/clang/3.6.0/include
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.san-i386.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.san-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan-i386.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.6.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.6.0/lib
OLD_DIRS+=usr/lib/clang/3.6.0
# 20150521
OLD_FILES+=usr/bin/demandoc
OLD_FILES+=usr/share/man/man1/demandoc.1.gz
OLD_FILES+=usr/share/man/man3/mandoc.3.gz
OLD_FILES+=usr/share/man/man3/mandoc_headers.3.gz
# 20150520
OLD_FILES+=usr/lib/libheimsqlite.a
OLD_FILES+=usr/lib/libheimsqlite.so
OLD_LIBS+=usr/lib/libheimsqlite.so.11
OLD_FILES+=usr/lib/libheimsqlite_p.a
# 20150506
OLD_FILES+=usr/share/man/man9/NDHASGIANT.9.gz
# 20150504
OLD_FILES+=usr/share/examples/etc/libmap32.conf
OLD_FILES+=usr/include/bsdstat.h
OLD_DIRS+=usr/lib32/private
OLD_LIBS+=usr/lib/private/libatf-c++.so.2
OLD_LIBS+=usr/lib/private/libbsdstat.so.1
OLD_LIBS+=usr/lib/private/libheimipcs.so.11
OLD_LIBS+=usr/lib/private/libsqlite3.so.0
OLD_LIBS+=usr/lib/private/libunbound.so.5
OLD_LIBS+=usr/lib/private/libatf-c.so.1
OLD_LIBS+=usr/lib/private/libheimipcc.so.11
OLD_LIBS+=usr/lib/private/libldns.so.5
OLD_LIBS+=usr/lib/private/libssh.so.5
OLD_LIBS+=usr/lib/private/libucl.so.1
OLD_DIRS+=usr/lib/private
# 20150501
OLD_FILES+=usr/bin/soeliminate
OLD_FILES+=usr/share/man/man1/soeliminate.1.gz
# 20150501: Remove the nvlist_.*[vf] functions manpages
OLD_FILES+=usr/share/man/man3/nvlist_addf_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addf_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addf_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addf_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addf_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addf_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addf_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addv_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addv_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addv_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addv_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addv_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addv_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addv_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_type.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_type.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_type.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_type.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getf_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getf_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getf_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getf_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getf_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getf_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getv_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getv_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getv_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getv_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getv_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getv_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movef_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movef_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movef_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movef_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movev_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movev_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movev_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movev_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takef_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takef_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takef_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takef_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takef_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takef_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takev_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takev_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takev_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takev_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takev_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takev_string.3.gz
# 20150429: remove never written documentation
OLD_FILES+=usr/share/doc/papers/hwpmc.ascii.gz
# 20150427: test/sys/kern/mmap_test moved to test/sys/vm/mmap_test
OLD_FILES+=usr/tests/sys/kern/mmap_test
# 20150422: zlib.c moved from net to libkern
OLD_FILES+=usr/include/net/zlib.h
OLD_FILES+=usr/include/net/zutil.h
# 20150418
OLD_FILES+=sbin/mount_oldnfs
OLD_FILES+=usr/share/man/man8/mount_oldnfs.8.gz
# 20150416: ALTQ moved to net/altq
OLD_FILES+=usr/include/altq/altq_rmclass_debug.h
OLD_FILES+=usr/include/altq/altq.h
OLD_FILES+=usr/include/altq/altq_cdnr.h
OLD_FILES+=usr/include/altq/altq_hfsc.h
OLD_FILES+=usr/include/altq/altq_priq.h
OLD_FILES+=usr/include/altq/altqconf.h
OLD_FILES+=usr/include/altq/altq_classq.h
OLD_FILES+=usr/include/altq/altq_red.h
OLD_FILES+=usr/include/altq/if_altq.h
OLD_FILES+=usr/include/altq/altq_var.h
OLD_FILES+=usr/include/altq/altq_rmclass.h
OLD_FILES+=usr/include/altq/altq_cbq.h
OLD_FILES+=usr/include/altq/altq_rio.h
OLD_DIRS+=usr/include/altq
# 20150330: ntp 4.2.8p1
OLD_FILES+=usr/share/doc/ntp/driver1.html
OLD_FILES+=usr/share/doc/ntp/driver10.html
OLD_FILES+=usr/share/doc/ntp/driver11.html
OLD_FILES+=usr/share/doc/ntp/driver12.html
OLD_FILES+=usr/share/doc/ntp/driver16.html
OLD_FILES+=usr/share/doc/ntp/driver18.html
OLD_FILES+=usr/share/doc/ntp/driver19.html
OLD_FILES+=usr/share/doc/ntp/driver2.html
OLD_FILES+=usr/share/doc/ntp/driver20.html
OLD_FILES+=usr/share/doc/ntp/driver22.html
OLD_FILES+=usr/share/doc/ntp/driver26.html
OLD_FILES+=usr/share/doc/ntp/driver27.html
OLD_FILES+=usr/share/doc/ntp/driver28.html
OLD_FILES+=usr/share/doc/ntp/driver29.html
OLD_FILES+=usr/share/doc/ntp/driver3.html
OLD_FILES+=usr/share/doc/ntp/driver30.html
OLD_FILES+=usr/share/doc/ntp/driver32.html
OLD_FILES+=usr/share/doc/ntp/driver33.html
OLD_FILES+=usr/share/doc/ntp/driver34.html
OLD_FILES+=usr/share/doc/ntp/driver35.html
OLD_FILES+=usr/share/doc/ntp/driver36.html
OLD_FILES+=usr/share/doc/ntp/driver37.html
OLD_FILES+=usr/share/doc/ntp/driver4.html
OLD_FILES+=usr/share/doc/ntp/driver5.html
OLD_FILES+=usr/share/doc/ntp/driver6.html
OLD_FILES+=usr/share/doc/ntp/driver7.html
OLD_FILES+=usr/share/doc/ntp/driver8.html
OLD_FILES+=usr/share/doc/ntp/driver9.html
OLD_FILES+=usr/share/doc/ntp/ldisc.html
OLD_FILES+=usr/share/doc/ntp/measure.html
OLD_FILES+=usr/share/doc/ntp/mx4200data.html
OLD_FILES+=usr/share/doc/ntp/notes.html
OLD_FILES+=usr/share/doc/ntp/patches.html
OLD_FILES+=usr/share/doc/ntp/porting.html
OLD_FILES+=usr/share/man/man1/sntp.1.gz
# 20150329
.if ${TARGET_ARCH} == "arm"
OLD_FILES+=usr/include/bootconfig.h
.endif
# 20150326
OLD_FILES+=usr/share/man/man1/pmcstudy.1.gz
# 20150315: new clang import which bumps version from 3.5.1 to 3.6.0
OLD_FILES+=usr/include/clang/3.5.1/__wmmintrin_aes.h
OLD_FILES+=usr/include/clang/3.5.1/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/clang/3.5.1/altivec.h
OLD_FILES+=usr/include/clang/3.5.1/ammintrin.h
OLD_FILES+=usr/include/clang/3.5.1/arm_acle.h
OLD_FILES+=usr/include/clang/3.5.1/arm_neon.h
OLD_FILES+=usr/include/clang/3.5.1/avx2intrin.h
OLD_FILES+=usr/include/clang/3.5.1/avxintrin.h
OLD_FILES+=usr/include/clang/3.5.1/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.5.1/bmiintrin.h
OLD_FILES+=usr/include/clang/3.5.1/cpuid.h
OLD_FILES+=usr/include/clang/3.5.1/emmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/f16cintrin.h
OLD_FILES+=usr/include/clang/3.5.1/fma4intrin.h
OLD_FILES+=usr/include/clang/3.5.1/fmaintrin.h
OLD_FILES+=usr/include/clang/3.5.1/ia32intrin.h
OLD_FILES+=usr/include/clang/3.5.1/immintrin.h
OLD_FILES+=usr/include/clang/3.5.1/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.5.1/mm3dnow.h
OLD_FILES+=usr/include/clang/3.5.1/mm_malloc.h
OLD_FILES+=usr/include/clang/3.5.1/mmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/module.modulemap
OLD_FILES+=usr/include/clang/3.5.1/nmmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/pmmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/popcntintrin.h
OLD_FILES+=usr/include/clang/3.5.1/prfchwintrin.h
OLD_FILES+=usr/include/clang/3.5.1/rdseedintrin.h
OLD_FILES+=usr/include/clang/3.5.1/rtmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/shaintrin.h
OLD_FILES+=usr/include/clang/3.5.1/smmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/tbmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/tmmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/wmmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/x86intrin.h
OLD_FILES+=usr/include/clang/3.5.1/xmmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/xopintrin.h
OLD_DIRS+=usr/include/clang/3.5.1
OLD_DIRS+=usr/include/clang
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.san-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.san-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.5.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.5.1/lib
OLD_DIRS+=usr/lib/clang/3.5.1
# 20150302: binutils documentation distributed as a manpage
OLD_FILES+=usr/share/doc/binutils/as.txt
OLD_FILES+=usr/share/doc/binutils/ld.txt
OLD_DIRS+=usr/share/doc/binutils
# 20150222: Removed bcd(6) and ppt(6)
OLD_FILES+=usr/bin/bcd
OLD_FILES+=usr/bin/ppt
OLD_FILES+=usr/share/man/man6/bcd.6.gz
OLD_FILES+=usr/share/man/man6/ppt.6.gz
# 20150217: Removed remnants of ar(4) driver
OLD_FILES+=usr/include/dev/ic/hd64570.h
# 20150212: /usr/games moving into /usr/bin
OLD_FILES+=usr/games/bcd
OLD_FILES+=usr/games/caesar
OLD_FILES+=usr/games/factor
OLD_FILES+=usr/games/fortune
OLD_FILES+=usr/games/grdc
OLD_FILES+=usr/games/morse
OLD_FILES+=usr/games/number
OLD_FILES+=usr/games/pom
OLD_FILES+=usr/games/ppt
OLD_FILES+=usr/games/primes
OLD_FILES+=usr/games/random
OLD_FILES+=usr/games/rot13
OLD_FILES+=usr/games/strfile
OLD_FILES+=usr/games/unstr
OLD_DIRS+=usr/games
# 20150209: liblzma header
OLD_FILES+=usr/include/lzma/lzma.h
# 20150124: spl.9 and friends
OLD_FILES+=usr/share/man/man9/spl.9.gz
OLD_FILES+=usr/share/man/man9/spl0.9.gz
OLD_FILES+=usr/share/man/man9/splbio.9.gz
OLD_FILES+=usr/share/man/man9/splclock.9.gz
OLD_FILES+=usr/share/man/man9/splhigh.9.gz
OLD_FILES+=usr/share/man/man9/splimp.9.gz
OLD_FILES+=usr/share/man/man9/splnet.9.gz
OLD_FILES+=usr/share/man/man9/splsoftclock.9.gz
OLD_FILES+=usr/share/man/man9/splsofttty.9.gz
OLD_FILES+=usr/share/man/man9/splstatclock.9.gz
OLD_FILES+=usr/share/man/man9/spltty.9.gz
OLD_FILES+=usr/share/man/man9/splvm.9.gz
OLD_FILES+=usr/share/man/man9/splx.9.gz
# 20150118: toeplitz.c moved from netinet to net
OLD_FILES+=usr/include/netinet/toeplitz.h
# 20150118: new clang import which bumps version from 3.5.0 to 3.5.1
OLD_FILES+=usr/include/clang/3.5.0/__wmmintrin_aes.h
OLD_FILES+=usr/include/clang/3.5.0/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/clang/3.5.0/altivec.h
OLD_FILES+=usr/include/clang/3.5.0/ammintrin.h
OLD_FILES+=usr/include/clang/3.5.0/arm_acle.h
OLD_FILES+=usr/include/clang/3.5.0/arm_neon.h
OLD_FILES+=usr/include/clang/3.5.0/avx2intrin.h
OLD_FILES+=usr/include/clang/3.5.0/avxintrin.h
OLD_FILES+=usr/include/clang/3.5.0/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.5.0/bmiintrin.h
OLD_FILES+=usr/include/clang/3.5.0/cpuid.h
OLD_FILES+=usr/include/clang/3.5.0/emmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/f16cintrin.h
OLD_FILES+=usr/include/clang/3.5.0/fma4intrin.h
OLD_FILES+=usr/include/clang/3.5.0/fmaintrin.h
OLD_FILES+=usr/include/clang/3.5.0/ia32intrin.h
OLD_FILES+=usr/include/clang/3.5.0/immintrin.h
OLD_FILES+=usr/include/clang/3.5.0/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.5.0/mm3dnow.h
OLD_FILES+=usr/include/clang/3.5.0/mm_malloc.h
OLD_FILES+=usr/include/clang/3.5.0/mmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/module.modulemap
OLD_FILES+=usr/include/clang/3.5.0/nmmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/pmmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/popcntintrin.h
OLD_FILES+=usr/include/clang/3.5.0/prfchwintrin.h
OLD_FILES+=usr/include/clang/3.5.0/rdseedintrin.h
OLD_FILES+=usr/include/clang/3.5.0/rtmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/shaintrin.h
OLD_FILES+=usr/include/clang/3.5.0/smmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/tbmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/tmmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/wmmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/x86intrin.h
OLD_FILES+=usr/include/clang/3.5.0/xmmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/xopintrin.h
OLD_DIRS+=usr/include/clang/3.5.0
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.san-i386.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.san-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan-i386.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.5.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.5.0/lib
OLD_DIRS+=usr/lib/clang/3.5.0
# 20150102: removal of asr(4)
OLD_FILES+=usr/share/man/man4/asr.4.gz
# 20150102: removal of texinfo
OLD_FILES+=usr/bin/info
OLD_FILES+=usr/bin/infokey
OLD_FILES+=usr/bin/install-info
OLD_FILES+=usr/bin/makeinfo
OLD_FILES+=usr/bin/texindex
OLD_FILES+=usr/share/info/am-utils.info.gz
OLD_FILES+=usr/share/info/as.info.gz
OLD_FILES+=usr/share/info/binutils.info.gz
OLD_FILES+=usr/share/info/com_err.info.gz
OLD_FILES+=usr/share/info/cpp.info.gz
OLD_FILES+=usr/share/info/cppinternals.info.gz
OLD_FILES+=usr/share/info/diff.info.gz
OLD_FILES+=usr/share/info/dir
OLD_FILES+=usr/share/info/gcc.info.gz
OLD_FILES+=usr/share/info/gccint.info.gz
OLD_FILES+=usr/share/info/gdb.info.gz
OLD_FILES+=usr/share/info/gdbint.info.gz
OLD_FILES+=usr/share/info/gperf.info.gz
OLD_FILES+=usr/share/info/grep.info.gz
OLD_FILES+=usr/share/info/groff.info.gz
OLD_FILES+=usr/share/info/heimdal.info.gz
OLD_FILES+=usr/share/info/history.info.gz
OLD_FILES+=usr/share/info/info-stnd.info.gz
OLD_FILES+=usr/share/info/info.info.gz
OLD_FILES+=usr/share/info/ld.info.gz
OLD_FILES+=usr/share/info/regex.info.gz
OLD_FILES+=usr/share/info/rluserman.info.gz
OLD_FILES+=usr/share/info/stabs.info.gz
OLD_FILES+=usr/share/info/texinfo.info.gz
OLD_FILES+=usr/share/man/man1/info.1.gz
OLD_FILES+=usr/share/man/man1/infokey.1.gz
OLD_FILES+=usr/share/man/man1/install-info.1.gz
OLD_FILES+=usr/share/man/man1/makeinfo.1.gz
OLD_FILES+=usr/share/man/man1/texindex.1.gz
OLD_FILES+=usr/share/man/man5/info.5.gz
OLD_FILES+=usr/share/man/man5/texinfo.5.gz
OLD_DIRS+=usr/share/info
# 20141231: new clang import which bumps version from 3.4.1 to 3.5.0
OLD_FILES+=usr/include/clang/3.4.1/__wmmintrin_aes.h
OLD_FILES+=usr/include/clang/3.4.1/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/clang/3.4.1/altivec.h
OLD_FILES+=usr/include/clang/3.4.1/ammintrin.h
OLD_FILES+=usr/include/clang/3.4.1/arm_neon.h
OLD_FILES+=usr/include/clang/3.4.1/avx2intrin.h
OLD_FILES+=usr/include/clang/3.4.1/avxintrin.h
OLD_FILES+=usr/include/clang/3.4.1/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.4.1/bmiintrin.h
OLD_FILES+=usr/include/clang/3.4.1/cpuid.h
OLD_FILES+=usr/include/clang/3.4.1/emmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/f16cintrin.h
OLD_FILES+=usr/include/clang/3.4.1/fma4intrin.h
OLD_FILES+=usr/include/clang/3.4.1/fmaintrin.h
OLD_FILES+=usr/include/clang/3.4.1/immintrin.h
OLD_FILES+=usr/include/clang/3.4.1/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.4.1/mm3dnow.h
OLD_FILES+=usr/include/clang/3.4.1/mm_malloc.h
OLD_FILES+=usr/include/clang/3.4.1/mmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/module.map
OLD_FILES+=usr/include/clang/3.4.1/nmmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/pmmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/popcntintrin.h
OLD_FILES+=usr/include/clang/3.4.1/prfchwintrin.h
OLD_FILES+=usr/include/clang/3.4.1/rdseedintrin.h
OLD_FILES+=usr/include/clang/3.4.1/rtmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/shaintrin.h
OLD_FILES+=usr/include/clang/3.4.1/smmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/tbmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/tmmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/wmmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/x86intrin.h
OLD_FILES+=usr/include/clang/3.4.1/xmmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/xopintrin.h
OLD_DIRS+=usr/include/clang/3.4.1
# 20141225: Remove gpib/ieee488
OLD_FILES+=usr/include/dev/ieee488/ibfoo_int.h
OLD_FILES+=usr/include/dev/ieee488/tnt4882.h
OLD_FILES+=usr/include/dev/ieee488/ugpib.h
OLD_FILES+=usr/include/dev/ieee488/upd7210.h
OLD_DIRS+=usr/include/dev/ieee488
OLD_FILES+=usr/include/gpib/gpib.h
OLD_DIRS+=usr/include/gpib
OLD_FILES+=usr/lib/libgpib.a
OLD_FILES+=usr/lib/libgpib_p.a
OLD_FILES+=usr/lib/libgpib.so
OLD_LIBS+=usr/lib/libgpib.so.3
OLD_FILES+=usr/share/man/man3/gpib.3.gz
OLD_FILES+=usr/share/man/man3/ibclr.3.gz
OLD_FILES+=usr/share/man/man3/ibdev.3.gz
OLD_FILES+=usr/share/man/man3/ibdma.3.gz
OLD_FILES+=usr/share/man/man3/ibeos.3.gz
OLD_FILES+=usr/share/man/man3/ibeot.3.gz
OLD_FILES+=usr/share/man/man3/ibloc.3.gz
OLD_FILES+=usr/share/man/man3/ibonl.3.gz
OLD_FILES+=usr/share/man/man3/ibpad.3.gz
OLD_FILES+=usr/share/man/man3/ibrd.3.gz
OLD_FILES+=usr/share/man/man3/ibsad.3.gz
OLD_FILES+=usr/share/man/man3/ibsic.3.gz
OLD_FILES+=usr/share/man/man3/ibtmo.3.gz
OLD_FILES+=usr/share/man/man3/ibtrg.3.gz
OLD_FILES+=usr/share/man/man3/ibwrt.3.gz
OLD_FILES+=usr/share/man/man4/gpib.4.gz
OLD_FILES+=usr/share/man/man4/pcii.4.gz
OLD_FILES+=usr/share/man/man4/tnt4882.4.gz
# 20141224: libxo moved to /lib
MOVED_LIBS+=usr/lib/libxo.so.0
# 20141223: remove in6_gif.h and in_gif.h
OLD_FILES+=usr/include/netinet/in_gif.h
OLD_FILES+=usr/include/netinet6/in6_gif.h
# 20141209: pw tests broken into a file per command
OLD_FILES+=usr/tests/usr.sbin/pw/pw_delete
OLD_FILES+=usr/tests/usr.sbin/pw/pw_modify
# 20141202: update to mandoc CVS 20141201
OLD_FILES+=usr.bin/preconv
OLD_FILES+=usr/share/man/man1/preconv.1.gz
# 20141129: mrouted rc.d scripts removed from base
OLD_FILES+=etc/rc.d/mrouted
# 20141126: convert sbin/mdconfig/tests to ATF format tests
OLD_FILES+=usr/tests/sbin/mdconfig/legacy_test
OLD_FILES+=usr/tests/sbin/mdconfig/mdconfig.test
OLD_FILES+=usr/tests/sbin/mdconfig/run.pl
# 20141126: remove xform_ipip decapsulation fallback
OLD_FILES+=usr/include/netipsec/ipip_var.h
# 20141122: mandoc updated to 1.13.1
OLD_FILES+=usr/share/mdocml/external.png
# 20141111: SF_KQUEUE code removed
OLD_FILES+=usr/include/sys/sf_base.h
OLD_FILES+=usr/include/sys/sf_sync.h
# 20141109: faith/faithd removal
OLD_FILES+=etc/rc.d/faith
OLD_FILES+=usr/share/man/man4/faith.4.gz
OLD_FILES+=usr/share/man/man4/if_faith.4.gz
OLD_FILES+=usr/sbin/faithd
OLD_FILES+=usr/share/man/man8/faithd.8.gz
# 20141107: overhaul if_gre(4)
OLD_FILES+=usr/include/netinet/ip_gre.h
# 20141102: postrandom obsoleted by new /dev/random code
OLD_FILES+=etc/rc.d/postrandom
# 20141031: initrandom obsoleted by new /dev/random code
OLD_FILES+=etc/rc.d/initrandom
# 20141030: atf 0.21 import
OLD_FILES+=usr/share/man/man3/atf-c++-api.3.gz
# 20141028: debug files accidentally installed as directory name
OLD_FILES+=usr/lib/debug/usr/lib/i18n
OLD_FILES+=usr/lib/debug/usr/lib/private
OLD_FILES+=usr/lib/debug/usr/lib32/i18n
OLD_FILES+=usr/lib/debug/usr/lib32/private
# 20141015: OpenSSL 1.0.1j import
OLD_FILES+=usr/share/openssl/man/man3/CMS_sign_add1_signer.3.gz
# 20141003: libproc version bump
OLD_LIBS+=usr/lib/libproc.so.2
# 20140922: sleepq_calc_signal_retval.9 and sleepq_catch_signals.9 removed
OLD_FILES+=usr/share/man/man9/sleepq_calc_signal_retval.9.gz
OLD_FILES+=usr/share/man/man9/sleepq_catch_signals.9.gz
# 20140917: hv_kvpd rc.d script removed in favor of devd configuration
OLD_FILES+=etc/rc.d/hv_kvpd
# 20140917: libnv was accidentally being installed to /usr/lib instead of /lib
MOVED_LIBS+=usr/lib/libnv.so.0
# 20140829: rc.d/kerberos removed
OLD_FILES+=etc/rc.d/kerberos
# 20140814: libopie version bump
OLD_LIBS+=usr/lib/libopie.so.7
# 20140811: otp-sha renamed to otp-sha1
OLD_FILES+=usr/bin/otp-sha
OLD_FILES+=usr/share/man/man1/otp-sha.1.gz
# 20140807: Remove private lib files that should not be installed
OLD_FILES+=usr/lib/private/libatf-c.a
OLD_FILES+=usr/lib/private/libatf-c.so
OLD_FILES+=usr/lib/private/libatf-c_p.a
OLD_FILES+=usr/lib/private/libatf-c++.a
OLD_FILES+=usr/lib/private/libatf-c++.so
OLD_FILES+=usr/lib/private/libatf-c++_p.a
OLD_FILES+=usr/lib/private/libbsdstat.a
OLD_FILES+=usr/lib/private/libbsdstat.so
OLD_FILES+=usr/lib/private/libbsdstat_p.a
OLD_FILES+=usr/lib/private/libheimipcc.a
OLD_FILES+=usr/lib/private/libheimipcc.so
OLD_FILES+=usr/lib/private/libheimipcc_p.a
OLD_FILES+=usr/lib/private/libheimipcs.a
OLD_FILES+=usr/lib/private/libheimipcs.so
OLD_FILES+=usr/lib/private/libheimipcs_p.a
OLD_FILES+=usr/lib/private/libldns.a
OLD_FILES+=usr/lib/private/libldns.so
OLD_FILES+=usr/lib/private/libldns_p.a
OLD_FILES+=usr/lib/private/libssh.a
OLD_FILES+=usr/lib/private/libssh.so
OLD_FILES+=usr/lib/private/libssh_p.a
OLD_FILES+=usr/lib/private/libunbound.a
OLD_FILES+=usr/lib/private/libunbound.so
OLD_FILES+=usr/lib/private/libunbound_p.a
OLD_FILES+=usr/lib/private/libucl.a
OLD_FILES+=usr/lib/private/libucl.so
OLD_FILES+=usr/lib/private/libucl_p.a
# 20140803: Remove an obsolete man page
OLD_FILES+=usr/share/man/man9/pmap_change_wiring.9.gz
# 20140731
OLD_FILES+=usr/share/man/man9/SYSCTL_ADD_OID.9.gz
# 20140728: libsbuf restored to old version
OLD_LIBS+=lib/libsbuf.so.7
# 20140728: Remove an obsolete man page
OLD_FILES+=usr/share/man/man9/VOP_GETVOBJECT.9.gz
OLD_FILES+=usr/share/man/man9/VOP_CREATEVOBJECT.9.gz
OLD_FILES+=usr/share/man/man9/VOP_DESTROYVOBJECT.9.gz
# 20140723: renamed to PCBGROUP.9
OLD_FILES+=usr/share/man/man9/PCBGROUPS.9.gz
# 20140722: browse_packages_ftp.sh removed
OLD_FILES+=usr/share/examples/bsdconfig/browse_packages_ftp.sh
# 20140718: Remove obsolete man pages
OLD_FILES+=usr/share/man/man9/zero_copy.9.gz
OLD_FILES+=usr/share/man/man9/zero_copy_sockets.9.gz
# 20140718: Remove an obsolete man page
OLD_FILES+=usr/share/man/man9/pmap_page_protect.9.gz
# 20140717: Remove an obsolete man page
OLD_FILES+=usr/share/man/man9/pmap_clear_reference.9.gz
# 20140716: Remove an incorrectly named man page
OLD_FILES+=usr/share/man/man9/pmap_ts_modified.9.gz
# 20140712: Removal of bsd.dtrace.mk
OLD_FILES+=usr/share/mk/bsd.dtrace.mk
# 20140705: turn libreadline into an internal lib
OLD_LIBS+=lib/libreadline.so.8
OLD_FILES+=usr/lib/libreadline.a
OLD_FILES+=usr/lib/libreadline_p.a
OLD_FILES+=usr/lib/libreadline.so
OLD_FILES+=usr/lib/libhistory.a
OLD_FILES+=usr/lib/libhistory_p.a
OLD_FILES+=usr/lib/libhistory.so
OLD_LIBS+=usr/lib/libhistory.so.8
OLD_FILES+=usr/include/readline/chardefs.h
OLD_FILES+=usr/include/readline/history.h
OLD_FILES+=usr/include/readline/keymaps.h
OLD_FILES+=usr/include/readline/readline.h
OLD_FILES+=usr/include/readline/tilde.h
OLD_FILES+=usr/include/readline/rlconf.h
OLD_FILES+=usr/include/readline/rlstdc.h
OLD_FILES+=usr/include/readline/rltypedefs.h
OLD_FILES+=usr/include/readline/rltypedefs.h
OLD_DIRS+=usr/include/readline
OLD_FILES+=usr/share/info/readline.info.gz
OLD_FILES+=usr/share/man/man3/readline.3.gz
OLD_FILES+=usr/share/man/man3/rlhistory.3.gz
# 20140625: csup removal
OLD_FILES+=usr/bin/csup
OLD_FILES+=usr/bin/cpasswd
OLD_FILES+=usr/share/man/man1/csup.1.gz
OLD_FILES+=usr/share/man/man1/cpasswd.1.gz
OLD_FILES+=usr/share/examples/cvsup/README
OLD_FILES+=usr/share/examples/cvsup/cvs-supfile
OLD_FILES+=usr/share/examples/cvsup/stable-supfile
OLD_FILES+=usr/share/examples/cvsup/standard-supfile
OLD_DIRS+=usr/share/examples/cvsup
# 20140614: send-pr removal
OLD_FILES+=usr/bin/sendbug
OLD_FILES+=usr/share/info/send-pr.info.gz
OLD_FILES+=usr/share/man/man1/send-pr.1.gz
OLD_FILES+=usr/share/man/man1/sendbug.1.gz
OLD_FILES+=etc/gnats/freefall
OLD_DIRS+=etc/gnats
# 20140512: new clang import which bumps version from 3.4 to 3.4.1
OLD_FILES+=usr/include/clang/3.4/__wmmintrin_aes.h
OLD_FILES+=usr/include/clang/3.4/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/clang/3.4/altivec.h
OLD_FILES+=usr/include/clang/3.4/ammintrin.h
OLD_FILES+=usr/include/clang/3.4/avx2intrin.h
OLD_FILES+=usr/include/clang/3.4/avxintrin.h
OLD_FILES+=usr/include/clang/3.4/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.4/bmiintrin.h
OLD_FILES+=usr/include/clang/3.4/cpuid.h
OLD_FILES+=usr/include/clang/3.4/emmintrin.h
OLD_FILES+=usr/include/clang/3.4/f16cintrin.h
OLD_FILES+=usr/include/clang/3.4/fma4intrin.h
OLD_FILES+=usr/include/clang/3.4/fmaintrin.h
OLD_FILES+=usr/include/clang/3.4/immintrin.h
OLD_FILES+=usr/include/clang/3.4/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.4/mm3dnow.h
OLD_FILES+=usr/include/clang/3.4/mm_malloc.h
OLD_FILES+=usr/include/clang/3.4/mmintrin.h
OLD_FILES+=usr/include/clang/3.4/module.map
OLD_FILES+=usr/include/clang/3.4/nmmintrin.h
OLD_FILES+=usr/include/clang/3.4/pmmintrin.h
OLD_FILES+=usr/include/clang/3.4/popcntintrin.h
OLD_FILES+=usr/include/clang/3.4/prfchwintrin.h
OLD_FILES+=usr/include/clang/3.4/rdseedintrin.h
OLD_FILES+=usr/include/clang/3.4/rtmintrin.h
OLD_FILES+=usr/include/clang/3.4/shaintrin.h
OLD_FILES+=usr/include/clang/3.4/smmintrin.h
OLD_FILES+=usr/include/clang/3.4/tbmintrin.h
OLD_FILES+=usr/include/clang/3.4/tmmintrin.h
OLD_FILES+=usr/include/clang/3.4/wmmintrin.h
OLD_FILES+=usr/include/clang/3.4/x86intrin.h
OLD_FILES+=usr/include/clang/3.4/xmmintrin.h
OLD_FILES+=usr/include/clang/3.4/xopintrin.h
OLD_FILES+=usr/include/clang/3.4/arm_neon.h
OLD_FILES+=usr/include/clang/3.4/module.map
OLD_DIRS+=usr/include/clang/3.4
# 20140505: Bogusly installing src.opts.mk
OLD_FILES+=usr/share/mk/src.opts.mk
# 20140505: Reject PR kern/187551
OLD_FILES+=usr/tests/sbin/ifconfig/fibs_test
# 20140502: Removal of lindev(4)
OLD_FILES+=usr/share/man/man4/lindev.4.gz
# 20140425
OLD_FILES+=usr/lib/libssp_p.a
OLD_FILES+=usr/lib/libstand_p.a
# 20140413: Removed NO_MANCOMPRESS from mount_fusefs
OLD_FILES+=usr/share/man/man8/mount_fusefs.8
# 20140314: AppleTalk
OLD_DIRS+=usr/include/netatalk
OLD_FILES+=usr/include/netatalk/aarp.h
OLD_FILES+=usr/include/netatalk/at.h
OLD_FILES+=usr/include/netatalk/at_extern.h
OLD_FILES+=usr/include/netatalk/at_var.h
OLD_FILES+=usr/include/netatalk/ddp.h
OLD_FILES+=usr/include/netatalk/ddp_pcb.h
OLD_FILES+=usr/include/netatalk/ddp_var.h
OLD_FILES+=usr/include/netatalk/endian.h
OLD_FILES+=usr/include/netatalk/phase2.h
# 20140314: Remove IPX/SPX
OLD_LIBS+=lib/libipx.so.5
OLD_FILES+=usr/include/netipx/ipx.h
OLD_FILES+=usr/include/netipx/ipx_if.h
OLD_FILES+=usr/include/netipx/ipx_pcb.h
OLD_FILES+=usr/include/netipx/ipx_var.h
OLD_FILES+=usr/include/netipx/spx.h
OLD_FILES+=usr/include/netipx/spx_debug.h
OLD_FILES+=usr/include/netipx/spx_timer.h
OLD_FILES+=usr/include/netipx/spx_var.h
OLD_DIRS+=usr/include/netipx
OLD_FILES+=usr/lib/libipx.a
OLD_FILES+=usr/lib/libipx.so
OLD_FILES+=usr/lib/libipx_p.a
OLD_FILES+=usr/sbin/IPXrouted
OLD_FILES+=usr/share/man/man3/ipx.3.gz
OLD_FILES+=usr/share/man/man3/ipx_addr.3.gz
OLD_FILES+=usr/share/man/man3/ipx_ntoa.3.gz
OLD_FILES+=usr/share/man/man4/ef.4.gz
OLD_FILES+=usr/share/man/man4/if_ef.4.gz
OLD_FILES+=usr/share/man/man8/IPXrouted.8.gz
# 20140314: bsdconfig usermgmt rewrite
OLD_FILES+=usr/libexec/bsdconfig/070.usermgmt/userinput
# 20140307: bsdconfig groupmgmt rewrite
OLD_FILES+=usr/libexec/bsdconfig/070.usermgmt/groupinput
# 20140223: Remove libyaml
OLD_FILES+=usr/lib/private/libyaml.a
OLD_FILES+=usr/lib/private/libyaml.so
OLD_LIBS+=usr/lib/private/libyaml.so.1
OLD_FILES+=usr/lib/private/libyaml_p.a
# 20140216: new clang import which bumps version from 3.3 to 3.4
OLD_FILES+=usr/bin/llvm-prof
OLD_FILES+=usr/include/clang/3.3/__wmmintrin_aes.h
OLD_FILES+=usr/include/clang/3.3/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/clang/3.3/altivec.h
OLD_FILES+=usr/include/clang/3.3/ammintrin.h
OLD_FILES+=usr/include/clang/3.3/avx2intrin.h
OLD_FILES+=usr/include/clang/3.3/avxintrin.h
OLD_FILES+=usr/include/clang/3.3/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.3/bmiintrin.h
OLD_FILES+=usr/include/clang/3.3/cpuid.h
OLD_FILES+=usr/include/clang/3.3/emmintrin.h
OLD_FILES+=usr/include/clang/3.3/f16cintrin.h
OLD_FILES+=usr/include/clang/3.3/fma4intrin.h
OLD_FILES+=usr/include/clang/3.3/fmaintrin.h
OLD_FILES+=usr/include/clang/3.3/immintrin.h
OLD_FILES+=usr/include/clang/3.3/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.3/mm3dnow.h
OLD_FILES+=usr/include/clang/3.3/mm_malloc.h
OLD_FILES+=usr/include/clang/3.3/mmintrin.h
OLD_FILES+=usr/include/clang/3.3/module.map
OLD_FILES+=usr/include/clang/3.3/nmmintrin.h
OLD_FILES+=usr/include/clang/3.3/pmmintrin.h
OLD_FILES+=usr/include/clang/3.3/popcntintrin.h
OLD_FILES+=usr/include/clang/3.3/prfchwintrin.h
OLD_FILES+=usr/include/clang/3.3/rdseedintrin.h
OLD_FILES+=usr/include/clang/3.3/rtmintrin.h
OLD_FILES+=usr/include/clang/3.3/smmintrin.h
OLD_FILES+=usr/include/clang/3.3/tmmintrin.h
OLD_FILES+=usr/include/clang/3.3/wmmintrin.h
OLD_FILES+=usr/include/clang/3.3/x86intrin.h
OLD_FILES+=usr/include/clang/3.3/xmmintrin.h
OLD_FILES+=usr/include/clang/3.3/xopintrin.h
OLD_FILES+=usr/share/man/man1/llvm-prof.1.gz
OLD_DIRS+=usr/include/clang/3.3
# 20140216: nve(4) removed
OLD_FILES+=usr/share/man/man4/if_nve.4.gz
OLD_FILES+=usr/share/man/man4/nve.4.gz
# 20140205: Open Firmware device moved
OLD_FILES+=usr/include/dev/ofw/ofw_nexus.h
# 20140128: libelf and libdwarf import
OLD_LIBS+=usr/lib/libelf.so.1
OLD_LIBS+=usr/lib/libdwarf.so.3
# 20140123: apicvar header moved to x86
OLD_FILES+=usr/include/machine/apicvar.h
# 20131215: libcam version bumped
OLD_LIBS+=lib/libcam.so.6
# 20131202: libcapsicum and libcasper moved to /lib/
MOVED_LIBS+=usr/lib/libcapsicum.so.0
MOVED_LIBS+=usr/lib/libcasper.so.0
# 20131109: extattr(2) mlinks fixed
OLD_FILES+=usr/share/man/man2/extattr_delete_list.2.gz
OLD_FILES+=usr/share/man/man2/extattr_get_list.2.gz
# 20131107: example files removed
OLD_FILES+=usr/share/examples/libusb20/aux.c
OLD_FILES+=usr/share/examples/libusb20/aux.h
# 20131103: WITH_LIBICONV_COMPAT removal
OLD_FILES+=usr/include/_libiconv_compat.h
OLD_FILES+=usr/lib/libiconv.a
OLD_FILES+=usr/lib/libiconv.so
OLD_LIBS+=usr/lib/libiconv.so.3
OLD_FILES+=usr/lib/libiconv_p.a
# 20131103: removal of utxrm(8), use 'utx rm' instead
OLD_FILES+=usr/sbin/utxrm
OLD_FILES+=usr/share/man/man8/utxrm.8.gz
# 20131031: pkg_install has been removed
OLD_FILES+=etc/periodic/daily/220.backup-pkgdb
OLD_FILES+=etc/periodic/daily/490.status-pkg-changes
OLD_FILES+=etc/periodic/security/460.chkportsum
OLD_FILES+=etc/periodic/weekly/400.status-pkg
OLD_FILES+=usr/sbin/pkg_add
OLD_FILES+=usr/sbin/pkg_create
OLD_FILES+=usr/sbin/pkg_delete
OLD_FILES+=usr/sbin/pkg_info
OLD_FILES+=usr/sbin/pkg_updating
OLD_FILES+=usr/sbin/pkg_version
OLD_FILES+=usr/share/man/man1/pkg_add.1.gz
OLD_FILES+=usr/share/man/man1/pkg_create.1.gz
OLD_FILES+=usr/share/man/man1/pkg_delete.1.gz
OLD_FILES+=usr/share/man/man1/pkg_info.1.gz
OLD_FILES+=usr/share/man/man1/pkg_updating.1.gz
OLD_FILES+=usr/share/man/man1/pkg_version.1.gz
# 20131030: /etc/keys moved to /usr/share/keys
OLD_DIRS+=etc/keys
OLD_DIRS+=etc/keys/pkg
OLD_DIRS+=etc/keys/pkg/revoked
OLD_DIRS+=etc/keys/pkg/trusted
OLD_FILES+=etc/keys/pkg/trusted/pkg.freebsd.org.2013102301
# 20131028: ng_fec(4) removed
OLD_FILES+=usr/include/netgraph/ng_fec.h
OLD_FILES+=usr/share/man/man4/ng_fec.4.gz
# 20131027: header moved
OLD_FILES+=usr/include/net/pf_mtag.h
# 20131023: remove never used iscsi directory
OLD_DIRS+=usr/share/examples/iscsi
# 20131021: isf(4) removed
OLD_FILES+=usr/sbin/isfctl
OLD_FILES+=usr/share/man/man4/isf.4.gz
OLD_FILES+=usr/share/man/man8/isfctl.8.gz
# 20131014: libbsdyml becomes private
OLD_FILES+=usr/lib/libbsdyml.a
OLD_FILES+=usr/lib/libbsdyml.so
OLD_LIBS+=usr/lib/libbsdyml.so.0
OLD_FILES+=usr/lib/libbsdyml_p.a
OLD_FILES+=usr/share/man/man3/libbsdyml.3.gz
OLD_FILES+=usr/include/bsdyml.h
# 20131013: Removal of the ATF tools
OLD_FILES+=etc/atf/FreeBSD.conf
OLD_FILES+=etc/atf/atf-run.hooks
OLD_FILES+=etc/atf/common.conf
OLD_FILES+=usr/bin/atf-config
OLD_FILES+=usr/bin/atf-report
OLD_FILES+=usr/bin/atf-run
OLD_FILES+=usr/bin/atf-version
OLD_FILES+=usr/share/atf/atf-run.hooks
OLD_FILES+=usr/share/examples/atf/atf-run.hooks
OLD_FILES+=usr/share/examples/atf/tests-results.css
OLD_FILES+=usr/share/man/man1/atf-config.1.gz
OLD_FILES+=usr/share/man/man1/atf-report.1.gz
OLD_FILES+=usr/share/man/man1/atf-run.1.gz
OLD_FILES+=usr/share/man/man1/atf-version.1.gz
OLD_FILES+=usr/share/man/man5/atf-formats.5.gz
OLD_FILES+=usr/share/xml/atf/tests-results.dtd
OLD_FILES+=usr/share/xsl/atf/tests-results.xsl
OLD_DIRS+=etc/atf
OLD_DIRS+=usr/share/examples/atf
OLD_DIRS+=usr/share/xml/atf
OLD_DIRS+=usr/share/xml
OLD_DIRS+=usr/share/xsl/atf
OLD_DIRS+=usr/share/xsl
# 20131009: freebsd-version moved from /libexec to /bin
OLD_FILES+=libexec/freebsd-version
# 20131001: ar and ranlib from binutils not used
OLD_FILES+=usr/bin/gnu-ar
OLD_FILES+=usr/bin/gnu-ranlib
OLD_FILES+=usr/share/man/man1/gnu-ar.1.gz
OLD_FILES+=usr/share/man/man1/gnu-ranlib.1.gz
# 20130930: BIND removed from base
OLD_FILES+=etc/mtree/BIND.chroot.dist
OLD_FILES+=etc/namedb
OLD_FILES+=etc/periodic/daily/470.status-named
OLD_FILES+=usr/bin/dig
OLD_FILES+=usr/bin/nslookup
OLD_FILES+=usr/bin/nsupdate
OLD_DIRS+=usr/include/lwres
OLD_FILES+=usr/include/lwres/context.h
OLD_FILES+=usr/include/lwres/int.h
OLD_FILES+=usr/include/lwres/ipv6.h
OLD_FILES+=usr/include/lwres/lang.h
OLD_FILES+=usr/include/lwres/list.h
OLD_FILES+=usr/include/lwres/lwbuffer.h
OLD_FILES+=usr/include/lwres/lwpacket.h
OLD_FILES+=usr/include/lwres/lwres.h
OLD_FILES+=usr/include/lwres/net.h
OLD_FILES+=usr/include/lwres/netdb.h
OLD_FILES+=usr/include/lwres/platform.h
OLD_FILES+=usr/include/lwres/result.h
OLD_FILES+=usr/include/lwres/string.h
OLD_FILES+=usr/include/lwres/version.h
OLD_FILES+=usr/lib/liblwres.a
OLD_FILES+=usr/lib/liblwres.so
OLD_LIBS+=usr/lib/liblwres.so.90
OLD_FILES+=usr/lib/liblwres_p.a
OLD_FILES+=usr/sbin/arpaname
OLD_FILES+=usr/sbin/ddns-confgen
OLD_FILES+=usr/sbin/dnssec-dsfromkey
OLD_FILES+=usr/sbin/dnssec-keyfromlabel
OLD_FILES+=usr/sbin/dnssec-keygen
OLD_FILES+=usr/sbin/dnssec-revoke
OLD_FILES+=usr/sbin/dnssec-settime
OLD_FILES+=usr/sbin/dnssec-signzone
OLD_FILES+=usr/sbin/dnssec-verify
OLD_FILES+=usr/sbin/genrandom
OLD_FILES+=usr/sbin/isc-hmac-fixup
OLD_FILES+=usr/sbin/lwresd
OLD_FILES+=usr/sbin/named
OLD_FILES+=usr/sbin/named-checkconf
OLD_FILES+=usr/sbin/named-checkzone
OLD_FILES+=usr/sbin/named-compilezone
OLD_FILES+=usr/sbin/named-journalprint
OLD_FILES+=usr/sbin/named.reconfig
OLD_FILES+=usr/sbin/named.reload
OLD_FILES+=usr/sbin/nsec3hash
OLD_FILES+=usr/sbin/rndc
OLD_FILES+=usr/sbin/rndc-confgen
OLD_DIRS+=usr/share/doc/bind9
OLD_FILES+=usr/share/doc/bind9/CHANGES
OLD_FILES+=usr/share/doc/bind9/COPYRIGHT
OLD_FILES+=usr/share/doc/bind9/FAQ
OLD_FILES+=usr/share/doc/bind9/HISTORY
OLD_FILES+=usr/share/doc/bind9/README
OLD_DIRS+=usr/share/doc/bind9/arm
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch01.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch02.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch03.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch04.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch05.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch06.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch07.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch08.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch09.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch10.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.pdf
OLD_FILES+=usr/share/doc/bind9/arm/man.arpaname.html
OLD_FILES+=usr/share/doc/bind9/arm/man.ddns-confgen.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dig.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-dsfromkey.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-keyfromlabel.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-keygen.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-revoke.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-settime.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-signzone.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-verify.html
OLD_FILES+=usr/share/doc/bind9/arm/man.genrandom.html
OLD_FILES+=usr/share/doc/bind9/arm/man.host.html
OLD_FILES+=usr/share/doc/bind9/arm/man.isc-hmac-fixup.html
OLD_FILES+=usr/share/doc/bind9/arm/man.named-checkconf.html
OLD_FILES+=usr/share/doc/bind9/arm/man.named-checkzone.html
OLD_FILES+=usr/share/doc/bind9/arm/man.named-journalprint.html
OLD_FILES+=usr/share/doc/bind9/arm/man.named.html
OLD_FILES+=usr/share/doc/bind9/arm/man.nsec3hash.html
OLD_FILES+=usr/share/doc/bind9/arm/man.nsupdate.html
OLD_FILES+=usr/share/doc/bind9/arm/man.rndc-confgen.html
OLD_FILES+=usr/share/doc/bind9/arm/man.rndc.conf.html
OLD_FILES+=usr/share/doc/bind9/arm/man.rndc.html
OLD_DIRS+=usr/share/doc/bind9/misc
OLD_FILES+=usr/share/doc/bind9/misc/dnssec
OLD_FILES+=usr/share/doc/bind9/misc/format-options.pl
OLD_FILES+=usr/share/doc/bind9/misc/ipv6
OLD_FILES+=usr/share/doc/bind9/misc/migration
OLD_FILES+=usr/share/doc/bind9/misc/migration-4to9
OLD_FILES+=usr/share/doc/bind9/misc/options
OLD_FILES+=usr/share/doc/bind9/misc/rfc-compliance
OLD_FILES+=usr/share/doc/bind9/misc/roadmap
OLD_FILES+=usr/share/doc/bind9/misc/sdb
OLD_FILES+=usr/share/doc/bind9/misc/sort-options.pl
OLD_FILES+=usr/share/man/man1/arpaname.1.gz
OLD_FILES+=usr/share/man/man1/dig.1.gz
OLD_FILES+=usr/share/man/man1/nslookup.1.gz
OLD_FILES+=usr/share/man/man1/nsupdate.1.gz
OLD_FILES+=usr/share/man/man3/lwres.3.gz
OLD_FILES+=usr/share/man/man3/lwres_addr_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_add.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_back.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_clear.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_first.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_forward.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_getmem.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_getuint16.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_getuint32.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_getuint8.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_init.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_invalidate.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_putmem.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_putuint16.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_putuint32.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_putuint8.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_subtract.3.gz
OLD_FILES+=usr/share/man/man3/lwres_conf_clear.3.gz
OLD_FILES+=usr/share/man/man3/lwres_conf_get.3.gz
OLD_FILES+=usr/share/man/man3/lwres_conf_init.3.gz
OLD_FILES+=usr/share/man/man3/lwres_conf_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_conf_print.3.gz
OLD_FILES+=usr/share/man/man3/lwres_config.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context_allocmem.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context_create.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context_destroy.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context_freemem.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context_initserial.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context_nextserial.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context_sendrecv.3.gz
OLD_FILES+=usr/share/man/man3/lwres_endhostent.3.gz
OLD_FILES+=usr/share/man/man3/lwres_endhostent_r.3.gz
OLD_FILES+=usr/share/man/man3/lwres_freeaddrinfo.3.gz
OLD_FILES+=usr/share/man/man3/lwres_freehostent.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gabn.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gabnrequest_free.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gabnrequest_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gabnrequest_render.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gabnresponse_free.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gabnresponse_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gabnresponse_render.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gai_strerror.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getaddrinfo.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getaddrsbyname.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gethostbyaddr.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gethostbyaddr_r.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gethostbyname.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gethostbyname2.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gethostbyname_r.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gethostent.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gethostent_r.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getipnode.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getipnodebyaddr.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getipnodebyname.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getnamebyaddr.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getnameinfo.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getrrsetbyname.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gnba.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gnbarequest_free.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gnbarequest_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gnbarequest_render.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gnbaresponse_free.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gnbaresponse_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gnbaresponse_render.3.gz
OLD_FILES+=usr/share/man/man3/lwres_herror.3.gz
OLD_FILES+=usr/share/man/man3/lwres_hstrerror.3.gz
OLD_FILES+=usr/share/man/man3/lwres_inetntop.3.gz
OLD_FILES+=usr/share/man/man3/lwres_lwpacket_parseheader.3.gz
OLD_FILES+=usr/share/man/man3/lwres_lwpacket_renderheader.3.gz
OLD_FILES+=usr/share/man/man3/lwres_net_ntop.3.gz
OLD_FILES+=usr/share/man/man3/lwres_noop.3.gz
OLD_FILES+=usr/share/man/man3/lwres_nooprequest_free.3.gz
OLD_FILES+=usr/share/man/man3/lwres_nooprequest_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_nooprequest_render.3.gz
OLD_FILES+=usr/share/man/man3/lwres_noopresponse_free.3.gz
OLD_FILES+=usr/share/man/man3/lwres_noopresponse_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_noopresponse_render.3.gz
OLD_FILES+=usr/share/man/man3/lwres_packet.3.gz
OLD_FILES+=usr/share/man/man3/lwres_resutil.3.gz
OLD_FILES+=usr/share/man/man3/lwres_sethostent.3.gz
OLD_FILES+=usr/share/man/man3/lwres_sethostent_r.3.gz
OLD_FILES+=usr/share/man/man3/lwres_string_parse.3.gz
OLD_FILES+=usr/share/man/man5/named.conf.5.gz
OLD_FILES+=usr/share/man/man5/rndc.conf.5.gz
OLD_FILES+=usr/share/man/man8/ddns-confgen.8.gz
OLD_FILES+=usr/share/man/man8/dnssec-dsfromkey.8.gz
OLD_FILES+=usr/share/man/man8/dnssec-keyfromlabel.8.gz
OLD_FILES+=usr/share/man/man8/dnssec-keygen.8.gz
OLD_FILES+=usr/share/man/man8/dnssec-revoke.8.gz
OLD_FILES+=usr/share/man/man8/dnssec-settime.8.gz
OLD_FILES+=usr/share/man/man8/dnssec-signzone.8.gz
OLD_FILES+=usr/share/man/man8/dnssec-verify.8.gz
OLD_FILES+=usr/share/man/man8/genrandom.8.gz
OLD_FILES+=usr/share/man/man8/isc-hmac-fixup.8.gz
OLD_FILES+=usr/share/man/man8/lwresd.8.gz
OLD_FILES+=usr/share/man/man8/named-checkconf.8.gz
OLD_FILES+=usr/share/man/man8/named-checkzone.8.gz
OLD_FILES+=usr/share/man/man8/named-compilezone.8.gz
OLD_FILES+=usr/share/man/man8/named-journalprint.8.gz
OLD_FILES+=usr/share/man/man8/named.8.gz
OLD_FILES+=usr/share/man/man8/named.reconfig.8.gz
OLD_FILES+=usr/share/man/man8/named.reload.8.gz
OLD_FILES+=usr/share/man/man8/nsec3hash.8.gz
OLD_FILES+=usr/share/man/man8/rndc-confgen.8.gz
OLD_FILES+=usr/share/man/man8/rndc.8.gz
OLD_DIRS+=var/named/dev
OLD_DIRS+=var/named/etc
OLD_DIRS+=var/named/etc/namedb
OLD_FILES+=var/named/etc/namedb/PROTO.localhost-v6.rev
OLD_FILES+=var/named/etc/namedb/PROTO.localhost.rev
OLD_DIRS+=var/named/etc/namedb/dynamic
OLD_FILES+=var/named/etc/namedb/make-localhost
OLD_DIRS+=var/named/etc/namedb/master
OLD_FILES+=var/named/etc/namedb/master/empty.db
OLD_FILES+=var/named/etc/namedb/master/localhost-forward.db
OLD_FILES+=var/named/etc/namedb/master/localhost-reverse.db
#OLD_FILES+=var/named/etc/namedb/named.conf # intentionally left out
OLD_FILES+=var/named/etc/namedb/named.root
OLD_DIRS+=var/named/etc/namedb/working
OLD_DIRS+=var/named/etc/namedb/slave
OLD_DIRS+=var/named/var
OLD_DIRS+=var/named/var/dump
OLD_DIRS+=var/named/var/log
OLD_DIRS+=var/named/var/run
OLD_DIRS+=var/named/var/run/named
OLD_DIRS+=var/named/var/stats
OLD_DIRS+=var/run/named
# 20130923: example moved
OLD_FILES+=usr/share/examples/bsdconfig/browse_packages.sh
# 20130908: libssh becomes private
OLD_FILES+=usr/lib/libssh.a
OLD_FILES+=usr/lib/libssh.so
OLD_LIBS+=usr/lib/libssh.so.5
OLD_FILES+=usr/lib/libssh_p.a
# 20130903: gnupatch is no more
OLD_FILES+=usr/bin/gnupatch
OLD_FILES+=usr/share/man/man1/gnupatch.1.gz
# 20130829: bsdpatch is patch unconditionally
OLD_FILES+=usr/bin/bsdpatch
OLD_FILES+=usr/share/man/man1/bsdpatch.1.gz
# 20130822: bind 9.9.3-P2 import
OLD_LIBS+=usr/lib/liblwres.so.80
# 20130814: vm_page_busy(9)
OLD_FILES+=usr/share/man/man9/vm_page_flash.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_io.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_io_finish.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_io_start.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_wakeup.9.gz
# 20130710: libkvm version bump
OLD_LIBS+=lib/libkvm.so.5
# 20130623: dialog update from 1.1 to 1.2
OLD_LIBS+=usr/lib/libdialog.so.7
# 20130616: vfs_mount.9 removed
OLD_FILES+=usr/share/man/man9/vfs_mount.9.gz
# 20130614: remove CVS from base
OLD_FILES+=usr/bin/cvs
OLD_FILES+=usr/bin/cvsbug
OLD_FILES+=usr/share/doc/psd/28.cvs/paper.ascii.gz
OLD_FILES+=usr/share/doc/psd/28.cvs/paper.ps.gz
OLD_DIRS+=usr/share/doc/psd/28.cvs
OLD_FILES+=usr/share/examples/cvs/contrib/README
OLD_FILES+=usr/share/examples/cvs/contrib/clmerge
OLD_FILES+=usr/share/examples/cvs/contrib/cln_hist
OLD_FILES+=usr/share/examples/cvs/contrib/commit_prep
OLD_FILES+=usr/share/examples/cvs/contrib/cvs2vendor
OLD_FILES+=usr/share/examples/cvs/contrib/cvs_acls
OLD_FILES+=usr/share/examples/cvs/contrib/cvscheck
OLD_FILES+=usr/share/examples/cvs/contrib/cvscheck.man
OLD_FILES+=usr/share/examples/cvs/contrib/cvshelp.man
OLD_FILES+=usr/share/examples/cvs/contrib/descend.man
OLD_FILES+=usr/share/examples/cvs/contrib/easy-import
OLD_FILES+=usr/share/examples/cvs/contrib/intro.doc
OLD_FILES+=usr/share/examples/cvs/contrib/log
OLD_FILES+=usr/share/examples/cvs/contrib/log_accum
OLD_FILES+=usr/share/examples/cvs/contrib/mfpipe
OLD_FILES+=usr/share/examples/cvs/contrib/rcs-to-cvs
OLD_FILES+=usr/share/examples/cvs/contrib/rcs2log
OLD_FILES+=usr/share/examples/cvs/contrib/rcslock
OLD_FILES+=usr/share/examples/cvs/contrib/sccs2rcs
OLD_DIRS+=usr/share/examples/cvs/contrib
OLD_DIRS+=usr/share/examples/cvs
OLD_FILES+=usr/share/info/cvs.info.gz
OLD_FILES+=usr/share/info/cvsclient.info.gz
OLD_FILES+=usr/share/man/man1/cvs.1.gz
OLD_FILES+=usr/share/man/man5/cvs.5.gz
OLD_FILES+=usr/share/man/man8/cvsbug.8.gz
# 20130607: WITH_DEBUG_FILES added
OLD_FILES+=lib/libufs.so.6.symbols
# 20130417: nfs fha moved from nfsserver to nfs
OLD_FILES+=usr/include/nfsserver/nfs_fha.h
# 20130411: new clang import which bumps version from 3.2 to 3.3
OLD_FILES+=usr/include/clang/3.2/__wmmintrin_aes.h
OLD_FILES+=usr/include/clang/3.2/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/clang/3.2/altivec.h
OLD_FILES+=usr/include/clang/3.2/ammintrin.h
OLD_FILES+=usr/include/clang/3.2/avx2intrin.h
OLD_FILES+=usr/include/clang/3.2/avxintrin.h
OLD_FILES+=usr/include/clang/3.2/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.2/bmiintrin.h
OLD_FILES+=usr/include/clang/3.2/cpuid.h
OLD_FILES+=usr/include/clang/3.2/emmintrin.h
OLD_FILES+=usr/include/clang/3.2/f16cintrin.h
OLD_FILES+=usr/include/clang/3.2/fma4intrin.h
OLD_FILES+=usr/include/clang/3.2/fmaintrin.h
OLD_FILES+=usr/include/clang/3.2/immintrin.h
OLD_FILES+=usr/include/clang/3.2/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.2/mm3dnow.h
OLD_FILES+=usr/include/clang/3.2/mm_malloc.h
OLD_FILES+=usr/include/clang/3.2/mmintrin.h
OLD_FILES+=usr/include/clang/3.2/module.map
OLD_FILES+=usr/include/clang/3.2/nmmintrin.h
OLD_FILES+=usr/include/clang/3.2/pmmintrin.h
OLD_FILES+=usr/include/clang/3.2/popcntintrin.h
OLD_FILES+=usr/include/clang/3.2/rtmintrin.h
OLD_FILES+=usr/include/clang/3.2/smmintrin.h
OLD_FILES+=usr/include/clang/3.2/tmmintrin.h
OLD_FILES+=usr/include/clang/3.2/wmmintrin.h
OLD_FILES+=usr/include/clang/3.2/x86intrin.h
OLD_FILES+=usr/include/clang/3.2/xmmintrin.h
OLD_FILES+=usr/include/clang/3.2/xopintrin.h
OLD_DIRS+=usr/include/clang/3.2
# 20130404: legacy ATA stack removed
OLD_FILES+=etc/periodic/daily/405.status-ata-raid
OLD_FILES+=rescue/atacontrol
OLD_FILES+=sbin/atacontrol
OLD_FILES+=usr/share/man/man8/atacontrol.8.gz
OLD_FILES+=usr/share/man/man4/atapicam.4.gz
OLD_FILES+=usr/share/man/man4/ataraid.4.gz
OLD_FILES+=usr/sbin/burncd
OLD_FILES+=usr/share/man/man8/burncd.8.gz
# 20130316: vinum.4 removed
OLD_FILES+=usr/share/man/man4/vinum.4.gz
# 20130312: fortunes-o removed
OLD_FILES+=usr/share/games/fortune/fortunes-o
OLD_FILES+=usr/share/games/fortune/fortunes-o.dat
# 20130311: Ports are no more available via cvsup
OLD_FILES+=usr/share/examples/cvsup/ports-supfile
OLD_FILES+=usr/share/examples/cvsup/refuse
OLD_FILES+=usr/share/examples/cvsup/refuse.README
# 20130309: NWFS and NCP supports removed
OLD_FILES+=usr/bin/ncplist
OLD_FILES+=usr/bin/ncplogin
OLD_FILES+=usr/bin/ncplogout
OLD_FILES+=usr/include/fs/nwfs/nwfs.h
OLD_FILES+=usr/include/fs/nwfs/nwfs_mount.h
OLD_FILES+=usr/include/fs/nwfs/nwfs_node.h
OLD_FILES+=usr/include/fs/nwfs/nwfs_subr.h
OLD_DIRS+=usr/include/fs/nwfs
OLD_FILES+=usr/include/netncp/ncp.h
OLD_FILES+=usr/include/netncp/ncp_cfg.h
OLD_FILES+=usr/include/netncp/ncp_conn.h
OLD_FILES+=usr/include/netncp/ncp_file.h
OLD_FILES+=usr/include/netncp/ncp_lib.h
OLD_FILES+=usr/include/netncp/ncp_ncp.h
OLD_FILES+=usr/include/netncp/ncp_nls.h
OLD_FILES+=usr/include/netncp/ncp_rcfile.h
OLD_FILES+=usr/include/netncp/ncp_rq.h
OLD_FILES+=usr/include/netncp/ncp_sock.h
OLD_FILES+=usr/include/netncp/ncp_subr.h
OLD_FILES+=usr/include/netncp/ncp_user.h
OLD_FILES+=usr/include/netncp/ncpio.h
OLD_FILES+=usr/include/netncp/nwerror.h
OLD_DIRS+=usr/include/netncp
OLD_FILES+=usr/lib/libncp.a
OLD_FILES+=usr/lib/libncp.so
OLD_LIBS+=usr/lib/libncp.so.4
OLD_FILES+=usr/lib/libncp_p.a
OLD_FILES+=usr/sbin/mount_nwfs
OLD_FILES+=usr/share/examples/nwclient/dot.nwfsrc
OLD_FILES+=usr/share/examples/nwclient/nwfs.sh.sample
OLD_DIRS+=usr/share/examples/nwclient
OLD_FILES+=usr/share/man/man1/ncplist.1.gz
OLD_FILES+=usr/share/man/man1/ncplogin.1.gz
OLD_FILES+=usr/share/man/man1/ncplogout.1.gz
OLD_FILES+=usr/share/man/man8/mount_nwfs.8.gz
# 20130302: NTFS support removed
OLD_FILES+=rescue/mount_ntfs
OLD_FILES+=sbin/mount_ntfs
OLD_FILES+=usr/include/fs/ntfs/ntfs.h
OLD_FILES+=usr/include/fs/ntfs/ntfs_compr.h
OLD_FILES+=usr/include/fs/ntfs/ntfs_ihash.h
OLD_FILES+=usr/include/fs/ntfs/ntfs_inode.h
OLD_FILES+=usr/include/fs/ntfs/ntfs_subr.h
OLD_FILES+=usr/include/fs/ntfs/ntfs_vfsops.h
OLD_FILES+=usr/include/fs/ntfs/ntfsmount.h
OLD_DIRS+=usr/include/fs/ntfs
OLD_FILES+=usr/share/man/man8/mount_ntfs.8.gz
# 20130302: PORTALFS support removed
OLD_FILES+=usr/include/fs/portalfs/portal.h
OLD_DIRS+=usr/include/fs/portalfs
OLD_FILES+=usr/sbin/mount_portalfs
OLD_FILES+=usr/share/examples/portal/README
OLD_FILES+=usr/share/examples/portal/portal.conf
OLD_DIRS+=usr/share/examples/portal
OLD_FILES+=usr/share/man/man8/mount_portalfs.8.gz
# 20130302: CODAFS support removed
OLD_FILES+=usr/share/man/man4/coda.4.gz
# 20130302: XFS support removed
OLD_FILES+=usr/share/man/man5/xfs.5.gz
# 20130302: Capsicum overhaul
OLD_FILES+=usr/share/man/man2/cap_getrights.2.gz
OLD_FILES+=usr/share/man/man2/cap_new.2.gz
# 20130213: OpenSSL 1.0.1e import
OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_verifyrecover.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_verifyrecover_init.3.gz
# 20130116: removed long unused directories for .1aout section manpages
OLD_FILES+=usr/share/man/en.ISO8859-1/man1aout
OLD_FILES+=usr/share/man/en.UTF-8/man1aout
OLD_DIRS+=usr/share/man/man1aout
OLD_DIRS+=usr/share/man/cat1aout
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat1aout
OLD_DIRS+=usr/share/man/en.UTF-8/cat1aout
# 20130103: gnats-supfile removed
OLD_FILES+=usr/share/examples/cvsup/gnats-supfile
# 20121230: libdisk removed
OLD_FILES+=usr/share/man/man3/libdisk.3.gz usr/include/libdisk.h
OLD_FILES+=usr/lib/libdisk.a
# 20121230: remove wrongly created directories for auditdistd
OLD_DIRS+=var/dist
OLD_DIRS+=var/remote
# 20121022: remove harp, hfa and idt man page
OLD_FILES+=usr/share/man/man4/harp.4.gz
OLD_FILES+=usr/share/man/man4/hfa.4.gz
OLD_FILES+=usr/share/man/man4/idt.4.gz
OLD_FILES+=usr/share/man/man4/if_idt.4.gz
# 20121022: VFS_LOCK_GIANT elimination
OLD_FILES+=usr/share/man/man9/VFS_LOCK_GIANT.9.gz
OLD_FILES+=usr/share/man/man9/VFS_UNLOCK_GIANT.9.gz
# 20121004: remove incomplete unwind.h
OLD_FILES+=usr/include/clang/3.2/unwind.h
# 20120910: NetBSD compat shims removed
OLD_FILES+=usr/include/cam/scsi/scsi_low_pisa.h
OLD_FILES+=usr/include/sys/device_port.h
# 20120909: doc and www supfiles removed
OLD_FILES+=usr/share/examples/cvsup/doc-supfile
OLD_FILES+=usr/share/examples/cvsup/www-supfile
# 20120908: pf cleanup
OLD_FILES+=usr/include/net/if_pflow.h
# 20120816: new clang import which bumps version from 3.1 to 3.2
OLD_FILES+=usr/bin/llvm-ld
OLD_FILES+=usr/bin/llvm-stub
OLD_FILES+=usr/include/clang/3.1/altivec.h
OLD_FILES+=usr/include/clang/3.1/avx2intrin.h
OLD_FILES+=usr/include/clang/3.1/avxintrin.h
OLD_FILES+=usr/include/clang/3.1/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.1/bmiintrin.h
OLD_FILES+=usr/include/clang/3.1/cpuid.h
OLD_FILES+=usr/include/clang/3.1/emmintrin.h
OLD_FILES+=usr/include/clang/3.1/fma4intrin.h
OLD_FILES+=usr/include/clang/3.1/immintrin.h
OLD_FILES+=usr/include/clang/3.1/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.1/mm3dnow.h
OLD_FILES+=usr/include/clang/3.1/mm_malloc.h
OLD_FILES+=usr/include/clang/3.1/mmintrin.h
OLD_FILES+=usr/include/clang/3.1/module.map
OLD_FILES+=usr/include/clang/3.1/nmmintrin.h
OLD_FILES+=usr/include/clang/3.1/pmmintrin.h
OLD_FILES+=usr/include/clang/3.1/popcntintrin.h
OLD_FILES+=usr/include/clang/3.1/smmintrin.h
OLD_FILES+=usr/include/clang/3.1/tmmintrin.h
OLD_FILES+=usr/include/clang/3.1/unwind.h
OLD_FILES+=usr/include/clang/3.1/wmmintrin.h
OLD_FILES+=usr/include/clang/3.1/x86intrin.h
OLD_FILES+=usr/include/clang/3.1/xmmintrin.h
OLD_DIRS+=usr/include/clang/3.1
OLD_FILES+=usr/share/man/man1/llvm-ld.1.gz
# 20120712: OpenSSL 1.0.1c import
OLD_LIBS+=lib/libcrypto.so.6
OLD_LIBS+=usr/lib/libssl.so.6
OLD_FILES+=usr/include/openssl/aes_locl.h
OLD_FILES+=usr/include/openssl/bio_lcl.h
OLD_FILES+=usr/include/openssl/e_os.h
OLD_FILES+=usr/include/openssl/fips.h
OLD_FILES+=usr/include/openssl/fips_rand.h
OLD_FILES+=usr/include/openssl/pq_compat.h
OLD_FILES+=usr/include/openssl/tmdiff.h
OLD_FILES+=usr/include/openssl/ui_locl.h
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_id_callback.3.gz
# 20120621: remove old man page
OLD_FILES+=usr/share/man/man8/vnconfig.8.gz
# 20120619: TOE support updated
OLD_FILES+=usr/include/netinet/toedev.h
# 20120613: auth.conf removed
OLD_FILES+=etc/auth.conf
OLD_FILES+=usr/share/examples/etc/auth.conf
OLD_FILES+=usr/share/man/man3/auth.3.gz
OLD_FILES+=usr/share/man/man3/auth_getval.3.gz
OLD_FILES+=usr/share/man/man5/auth.conf.5.gz
# 20120530: kde pam lives now in ports
OLD_FILES+=etc/pam.d/kde
# 20120521: byacc import
OLD_FILES+=usr/bin/yyfix
OLD_FILES+=usr/share/man/man1/yyfix.1.gz
# 20120505: new clang import installed a redundant internal header
OLD_FILES+=usr/include/clang/3.1/stdalign.h
# 20120428: MD2 removed from libmd
OLD_LIBS+=lib/libmd.so.5
OLD_FILES+=usr/include/md2.h
OLD_FILES+=usr/share/man/man3/MD2Data.3.gz
OLD_FILES+=usr/share/man/man3/MD2End.3.gz
OLD_FILES+=usr/share/man/man3/MD2File.3.gz
OLD_FILES+=usr/share/man/man3/MD2FileChunk.3.gz
OLD_FILES+=usr/share/man/man3/MD2Final.3.gz
OLD_FILES+=usr/share/man/man3/MD2Init.3.gz
OLD_FILES+=usr/share/man/man3/MD2Update.3.gz
OLD_FILES+=usr/share/man/man3/md2.3.gz
# 20120425: libusb version bump (r234684)
OLD_LIBS+=usr/lib/libusb.so.2
OLD_FILES+=usr/share/man/man3/libsub_get_active_config_descriptor.3.gz
# 20120415: new clang import which bumps version from 3.0 to 3.1
OLD_FILES+=usr/include/clang/3.0/altivec.h
OLD_FILES+=usr/include/clang/3.0/avxintrin.h
OLD_FILES+=usr/include/clang/3.0/emmintrin.h
OLD_FILES+=usr/include/clang/3.0/immintrin.h
OLD_FILES+=usr/include/clang/3.0/mm3dnow.h
OLD_FILES+=usr/include/clang/3.0/mm_malloc.h
OLD_FILES+=usr/include/clang/3.0/mmintrin.h
OLD_FILES+=usr/include/clang/3.0/nmmintrin.h
OLD_FILES+=usr/include/clang/3.0/pmmintrin.h
OLD_FILES+=usr/include/clang/3.0/smmintrin.h
OLD_FILES+=usr/include/clang/3.0/tmmintrin.h
OLD_FILES+=usr/include/clang/3.0/wmmintrin.h
OLD_FILES+=usr/include/clang/3.0/x86intrin.h
OLD_FILES+=usr/include/clang/3.0/xmmintrin.h
OLD_DIRS+=usr/include/clang/3.0
# 20120412: BIND 9.8.1 release notes removed
OLD_FILES+=usr/share/doc/bind9/RELEASE-NOTES-BIND-9.8.1.pdf
OLD_FILES+=usr/share/doc/bind9/RELEASE-NOTES-BIND-9.8.1.txt
OLD_FILES+=usr/share/doc/bind9/RELEASE-NOTES-BIND-9.8.1.html
OLD_FILES+=usr/share/doc/bind9/release-notes.css
# 20120330: legacy(4) moved to x86
OLD_FILES+=usr/include/machine/legacyvar.h
# 20120324: MPI headers updated
OLD_FILES+=usr/include/dev/mpt/mpilib/mpi_inb.h
# 20120322: hwpmc_mips24k.h removed
OLD_FILES+=usr/include/dev/hwpmc/hwpmc_mips24k.h
# 20120322: Update heimdal to 1.5.1
OLD_FILES+=usr/include/krb5-v4compat.h \
usr/include/krb_err.h \
usr/include/hdb-private.h \
usr/share/man/man3/krb5_addresses.3.gz \
usr/share/man/man3/krb5_cc_cursor.3.gz \
usr/share/man/man3/krb5_cc_ops.3.gz \
usr/share/man/man3/krb5_config.3.gz \
usr/share/man/man3/krb5_config_get_int_default.3.gz \
usr/share/man/man3/krb5_context.3.gz \
usr/share/man/man3/krb5_data.3.gz \
usr/share/man/man3/krb5_err.3.gz \
usr/share/man/man3/krb5_errx.3.gz \
usr/share/man/man3/krb5_keyblock.3.gz \
usr/share/man/man3/krb5_keytab_entry.3.gz \
usr/share/man/man3/krb5_kt_cursor.3.gz \
usr/share/man/man3/krb5_kt_ops.3.gz \
usr/share/man/man3/krb5_set_warn_dest.3.gz \
usr/share/man/man3/krb5_verr.3.gz \
usr/share/man/man3/krb5_verrx.3.gz \
usr/share/man/man3/krb5_vwarnx.3.gz \
usr/share/man/man3/krb5_warn.3.gz \
usr/share/man/man3/krb5_warnx.3.gz
OLD_LIBS+=usr/lib/libasn1.so.10 \
usr/lib/libhdb.so.10 \
usr/lib/libheimntlm.so.10 \
usr/lib/libhx509.so.10 \
usr/lib/libkadm5clnt.so.10 \
usr/lib/libkadm5srv.so.10 \
usr/lib/libkafs5.so.10 \
usr/lib/libkrb5.so.10 \
usr/lib/libroken.so.10
# 20120309: Remove fifofs header files
OLD_FILES+=usr/include/fs/fifofs/fifo.h
OLD_DIRS+=usr/include/fs/fifofs
# 20120304: xlocale cleanup
OLD_FILES+=usr/include/_xlocale_ctype.h
# 20120225: libarchive 3.0.3
OLD_FILES+=usr/share/man/man3/archive_read_data_into_buffer.3.gz \
usr/share/man/man3/archive_read_support_compression_all.3.gz \
usr/share/man/man3/archive_read_support_compression_bzip2.3.gz \
usr/share/man/man3/archive_read_support_compression_compress.3.gz \
usr/share/man/man3/archive_read_support_compression_gzip.3.gz \
usr/share/man/man3/archive_read_support_compression_lzma.3.gz \
usr/share/man/man3/archive_read_support_compression_none.3.gz \
usr/share/man/man3/archive_read_support_compression_program.3.gz \
usr/share/man/man3/archive_read_support_compression_program_signature.3.gz \
usr/share/man/man3/archive_read_support_compression_xz.3.gz \
usr/share/man/man3/archive_write_set_callbacks.3.gz \
usr/share/man/man3/archive_write_set_compression_bzip2.3.gz \
usr/share/man/man3/archive_write_set_compression_compress.3.gz \
usr/share/man/man3/archive_write_set_compression_gzip.3.gz \
usr/share/man/man3/archive_write_set_compression_none.3.gz \
usr/share/man/man3/archive_write_set_compression_program.3.gz
OLD_LIBS+=usr/lib/libarchive.so.5
# 20120113: removal of wtmpcvt(1)
OLD_FILES+=usr/bin/wtmpcvt
OLD_FILES+=usr/share/man/man1/wtmpcvt.1.gz
# 20111214: eventtimers(7) moved to eventtimers(4)
OLD_FILES+=usr/share/man/man7/eventtimers.7.gz
# 20111125: amd(4) removed
OLD_FILES+=usr/share/man/man4/amd.4.gz
# 20111125: libodialog removed
OLD_FILES+=usr/lib/libodialog.a
OLD_FILES+=usr/lib/libodialog.so
OLD_LIBS+=usr/lib/libodialog.so.7
OLD_FILES+=usr/lib/libodialog_p.a
# 20110930: sysinstall removed
OLD_FILES+=usr/sbin/sysinstall
OLD_FILES+=usr/share/man/man8/sysinstall.8.gz
OLD_FILES+=usr/lib/libftpio.a
OLD_FILES+=usr/lib/libftpio.so
OLD_LIBS+=usr/lib/libftpio.so.8
OLD_FILES+=usr/lib/libftpio_p.a
OLD_FILES+=usr/include/ftpio.h
OLD_FILES+=usr/share/man/man3/ftpio.3.gz
# 20110915: rename congestion control manpages
OLD_FILES+=usr/share/man/man9/cc.9.gz
# 20110831: atomic page flags operations
OLD_FILES+=usr/share/man/man9/vm_page_flag.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_flag_clear.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_flag_set.9.gz
# 20110828: library version bump for 9.0
OLD_LIBS+=lib/libcam.so.5
OLD_LIBS+=lib/libpcap.so.7
OLD_LIBS+=lib/libufs.so.5
OLD_LIBS+=usr/lib/libbsnmp.so.5
OLD_LIBS+=usr/lib/libdwarf.so.2
OLD_LIBS+=usr/lib/libopie.so.6
OLD_LIBS+=usr/lib/librtld_db.so.1
OLD_LIBS+=usr/lib/libtacplus.so.4
# 20110817: no more acd.4, ad.4, afd.4 and ast.4
OLD_FILES+=usr/share/man/man4/acd.4.gz
OLD_FILES+=usr/share/man/man4/ad.4.gz
OLD_FILES+=usr/share/man/man4/afd.4.gz
OLD_FILES+=usr/share/man/man4/ast.4.gz
# 20110718: no longer useful in the age of rc.d
OLD_FILES+=usr/sbin/named.reconfig
OLD_FILES+=usr/sbin/named.reload
OLD_FILES+=usr/share/man/man8/named.reconfig.8.gz
OLD_FILES+=usr/share/man/man8/named.reload.8.gz
# 20110716: bind 9.8.0 import
OLD_LIBS+=usr/lib/liblwres.so.50
OLD_FILES+=usr/share/doc/bind9/KNOWN-DEFECTS
OLD_FILES+=usr/share/doc/bind9/NSEC3-NOTES
OLD_FILES+=usr/share/doc/bind9/README.idnkit
OLD_FILES+=usr/share/doc/bind9/README.pkcs11
# 20110709: vm_map_clean.9 -> vm_map_sync.9
OLD_FILES+=usr/share/man/man9/vm_map_clean.9.gz
# 20110709: Catch up with removal of these functions
OLD_FILES+=usr/share/man/man9/vm_page_copy.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_protect.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_zero_fill.9.gz
# 20110707: script no longer needed by /etc/rc.d/nfsd
OLD_FILES+=etc/rc.d/nfsserver
# 20110705: files moved so both NFS clients can share them
OLD_FILES+=usr/include/nfsclient/krpc.h
OLD_FILES+=usr/include/nfsclient/nfsdiskless.h
# 20110705: the switch of default NFS client to the new one
OLD_FILES+=sbin/mount_newnfs
OLD_FILES+=usr/share/man/man8/mount_newnfs.8.gz
OLD_FILES+=usr/include/nfsclient/nfs_kdtrace.h
# 20110628: calendar.msk removed
OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.msk
# 20110517: libpkg removed
OLD_FILES+=usr/include/pkg.h
OLD_FILES+=usr/lib/libpkg.a
OLD_FILES+=usr/lib/libpkg.so
OLD_LIBS+=usr/lib/libpkg.so.0
OLD_FILES+=usr/lib/libpkg_p.a
# 20110517: libsbuf version bump
OLD_LIBS+=lib/libsbuf.so.5
# 20110502: new clang import which bumps version from 2.9 to 3.0
OLD_FILES+=usr/include/clang/2.9/emmintrin.h
OLD_FILES+=usr/include/clang/2.9/mm_malloc.h
OLD_FILES+=usr/include/clang/2.9/mmintrin.h
OLD_FILES+=usr/include/clang/2.9/pmmintrin.h
OLD_FILES+=usr/include/clang/2.9/tmmintrin.h
OLD_FILES+=usr/include/clang/2.9/xmmintrin.h
OLD_DIRS+=usr/include/clang/2.9
# 20110417: removal of Objective-C support
OLD_FILES+=usr/include/objc/encoding.h
OLD_FILES+=usr/include/objc/hash.h
OLD_FILES+=usr/include/objc/NXConstStr.h
OLD_FILES+=usr/include/objc/objc-api.h
OLD_FILES+=usr/include/objc/objc-decls.h
OLD_FILES+=usr/include/objc/objc-list.h
OLD_FILES+=usr/include/objc/objc.h
OLD_FILES+=usr/include/objc/Object.h
OLD_FILES+=usr/include/objc/Protocol.h
OLD_FILES+=usr/include/objc/runtime.h
OLD_FILES+=usr/include/objc/sarray.h
OLD_FILES+=usr/include/objc/thr.h
OLD_FILES+=usr/include/objc/typedstream.h
OLD_FILES+=usr/lib/libobjc.a
OLD_FILES+=usr/lib/libobjc.so
OLD_FILES+=usr/lib/libobjc_p.a
OLD_FILES+=usr/libexec/cc1obj
OLD_LIBS+=usr/lib/libobjc.so.4
OLD_DIRS+=usr/include/objc
# 20110331: firmware.img created at build time
OLD_FILES+=usr/share/examples/kld/firmware/fwimage/firmware.img
# 20110224: sticky.8 -> sticky.7
OLD_FILES+=usr/share/man/man8/sticky.8.gz
# 20110220: new clang import which bumps version from 2.8 to 2.9
OLD_FILES+=usr/include/clang/2.8/emmintrin.h
OLD_FILES+=usr/include/clang/2.8/mm_malloc.h
OLD_FILES+=usr/include/clang/2.8/mmintrin.h
OLD_FILES+=usr/include/clang/2.8/pmmintrin.h
OLD_FILES+=usr/include/clang/2.8/tmmintrin.h
OLD_FILES+=usr/include/clang/2.8/xmmintrin.h
OLD_DIRS+=usr/include/clang/2.8
# 20110119: netinet/sctp_cc_functions.h removed
OLD_FILES+=usr/include/netinet/sctp_cc_functions.h
# 20110119: Remove SYSCTL_*X* sysctl additions
OLD_FILES+=usr/share/man/man9/SYSCTL_XINT.9.gz \
usr/share/man/man9/SYSCTL_XLONG.9.gz
# 20110112: Update dialog to new version, rename old libdialog to libodialog,
# removing associated man pages and header files.
OLD_FILES+=usr/share/man/man3/draw_shadow.3.gz \
usr/share/man/man3/draw_box.3.gz usr/share/man/man3/line_edit.3.gz \
usr/share/man/man3/strheight.3.gz usr/share/man/man3/strwidth.3.gz \
usr/share/man/man3/dialog_create_rc.3.gz \
usr/share/man/man3/dialog_yesno.3.gz usr/share/man/man3/dialog_noyes.3.gz \
usr/share/man/man3/dialog_prgbox.3.gz \
usr/share/man/man3/dialog_textbox.3.gz usr/share/man/man3/dialog_menu.3.gz \
usr/share/man/man3/dialog_checklist.3.gz \
usr/share/man/man3/dialog_radiolist.3.gz \
usr/share/man/man3/dialog_inputbox.3.gz \
usr/share/man/man3/dialog_clear_norefresh.3.gz \
usr/share/man/man3/dialog_clear.3.gz usr/share/man/man3/dialog_update.3.gz \
usr/share/man/man3/dialog_fselect.3.gz \
usr/share/man/man3/dialog_notify.3.gz \
usr/share/man/man3/dialog_mesgbox.3.gz \
usr/share/man/man3/dialog_gauge.3.gz usr/share/man/man3/init_dialog.3.gz \
usr/share/man/man3/end_dialog.3.gz usr/share/man/man3/use_helpfile.3.gz \
usr/share/man/man3/use_helpline.3.gz usr/share/man/man3/get_helpline.3.gz \
usr/share/man/man3/restore_helpline.3.gz \
usr/share/man/man3/dialog_msgbox.3.gz \
usr/share/man/man3/dialog_ftree.3.gz usr/share/man/man3/dialog_tree.3.gz \
usr/share/examples/dialog/README usr/share/examples/dialog/checklist \
usr/share/examples/dialog/ftreebox usr/share/examples/dialog/infobox \
usr/share/examples/dialog/inputbox usr/share/examples/dialog/menubox \
usr/share/examples/dialog/msgbox usr/share/examples/dialog/prgbox \
usr/share/examples/dialog/radiolist usr/share/examples/dialog/textbox \
usr/share/examples/dialog/treebox usr/share/examples/dialog/yesno \
usr/share/examples/libdialog/Makefile usr/share/examples/libdialog/check1.c\
usr/share/examples/libdialog/check2.c usr/share/examples/libdialog/check3.c\
usr/share/examples/libdialog/dselect.c \
usr/share/examples/libdialog/fselect.c \
usr/share/examples/libdialog/ftree1.c \
usr/share/examples/libdialog/ftree1.test \
usr/share/examples/libdialog/ftree2.c \
usr/share/examples/libdialog/ftree2.test \
usr/share/examples/libdialog/gauge.c usr/share/examples/libdialog/input1.c \
usr/share/examples/libdialog/input2.c usr/share/examples/libdialog/menu1.c \
usr/share/examples/libdialog/menu2.c usr/share/examples/libdialog/menu3.c \
usr/share/examples/libdialog/msg.c usr/share/examples/libdialog/prgbox.c \
usr/share/examples/libdialog/radio1.c usr/share/examples/libdialog/radio2.c\
usr/share/examples/libdialog/radio3.c usr/share/examples/libdialog/text.c \
usr/share/examples/libdialog/tree.c usr/share/examples/libdialog/yesno.c
OLD_DIRS+=usr/share/examples/libdialog usr/share/examples/dialog
# 20101114: Remove long-obsolete MAKEDEV.8
OLD_FILES+=usr/share/man/man8/MAKEDEV.8.gz
# 20101112: vgonel(9) has gone to private API a while ago
OLD_FILES+=usr/share/man/man9/vgonel.9.gz
# 20101112: removed gasp.info
OLD_FILES+=usr/share/info/gasp.info.gz
# 20101109: machine/mutex.h removed
OLD_FILES+=usr/include/machine/mutex.h
# 20101109: headers moved from machine/ to x86/
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/machine/mptable.h
.endif
# 20101101: headers moved from machine/ to x86/
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/machine/apicreg.h
OLD_FILES+=usr/include/machine/mca.h
.endif
# 20101020: catch up with vm_page_sleep_if_busy rename
OLD_FILES+=usr/share/man/man9/vm_page_sleep_busy.9.gz
# 20101018: taskqueue(9) updates
OLD_FILES+=usr/share/man/man9/taskqueue_find.9.gz
# 20101011: removed subblock.h from liblzma
OLD_FILES+=usr/include/lzma/subblock.h
# 20101002: removed manpath.config
OLD_FILES+=etc/manpath.config
OLD_FILES+=usr/share/examples/etc/manpath.config
# 20100910: renamed sbuf_overflowed to sbuf_error
OLD_FILES+=usr/share/man/man9/sbuf_overflowed.9.gz
# 20100815: retired last traces of chooseproc(9)
OLD_FILES+=usr/share/man/man9/chooseproc.9.gz
# 20100806: removal of unused libcompat routines
OLD_FILES+=usr/share/man/man3/ascftime.3.gz
OLD_FILES+=usr/share/man/man3/cfree.3.gz
OLD_FILES+=usr/share/man/man3/cftime.3.gz
OLD_FILES+=usr/share/man/man3/getpw.3.gz
# 20100725: acpi_aiboost(4) removal
OLD_FILES+=usr/share/man/man4/acpi_aiboost.4.gz
# 20100724: nfsclient/nfs_lock.h moved to nfs/nfs_lock.h
OLD_FILES+=usr/include/nfsclient/nfs_lock.h
# 20100720: new clang import which bumps version from 2.0 to 2.8
OLD_FILES+=usr/include/clang/2.0/emmintrin.h
OLD_FILES+=usr/include/clang/2.0/mm_malloc.h
OLD_FILES+=usr/include/clang/2.0/mmintrin.h
OLD_FILES+=usr/include/clang/2.0/pmmintrin.h
OLD_FILES+=usr/include/clang/2.0/tmmintrin.h
OLD_FILES+=usr/include/clang/2.0/xmmintrin.h
OLD_DIRS+=usr/include/clang/2.0
# 20100706: removed pc-sysinstall's detect-vmware.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/detect-vmware.sh
# 20100701: [powerpc] removed <machine/intr.h>
.if ${TARGET_ARCH} == "powerpc"
OLD_FILES+=usr/include/machine/intr.h
.endif
# 20100514: library version bump for versioned symbols for liblzma
OLD_LIBS+=usr/lib/liblzma.so.0
# 20100511: move GCC-specific headers to /usr/include/gcc
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/emmintrin.h
OLD_FILES+=usr/include/mm_malloc.h
OLD_FILES+=usr/include/pmmintrin.h
OLD_FILES+=usr/include/xmmintrin.h
.endif
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" || ${TARGET_ARCH} == "arm"
OLD_FILES+=usr/include/mmintrin.h
.endif
.if ${TARGET_ARCH} == "powerpc"
OLD_FILES+=usr/include/altivec.h
OLD_FILES+=usr/include/ppc-asm.h
OLD_FILES+=usr/include/spe.h
.endif
# 20100416: [mips] removed <machine/psl.h>
.if ${TARGET_ARCH} == "mips"
OLD_FILES+=usr/include/machine/psl.h
.endif
# 20100415: [mips] removed unused headers
.if ${TARGET_ARCH} == "mips"
OLD_FILES+=usr/include/machine/archtype.h
OLD_FILES+=usr/include/machine/segments.h
OLD_FILES+=usr/include/machine/rm7000.h
OLD_FILES+=usr/include/machine/defs.h
OLD_FILES+=usr/include/machine/queue.h
.endif
# 20100326: gcpio removal
OLD_FILES+=usr/bin/gcpio
OLD_FILES+=usr/share/info/cpio.info.gz
OLD_FILES+=usr/share/man/man1/gcpio.1.gz
# 20100322: libz update
OLD_LIBS+=lib/libz.so.5
# 20100314: removal of regexp.h
OLD_FILES+=usr/include/regexp.h
OLD_FILES+=usr/share/man/man3/regexp.3.gz
OLD_FILES+=usr/share/man/man3/regsub.3.gz
# 20100303: actual removal of utmp.h
OLD_FILES+=usr/include/utmp.h
# 20100208: man pages moved
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/share/man/man4/i386/alpm.4.gz
OLD_FILES+=usr/share/man/man4/i386/amdpm.4.gz
OLD_FILES+=usr/share/man/man4/i386/mcd.4.gz
OLD_FILES+=usr/share/man/man4/i386/padlock.4.gz
OLD_FILES+=usr/share/man/man4/i386/pcf.4.gz
OLD_FILES+=usr/share/man/man4/i386/scd.4.gz
OLD_FILES+=usr/share/man/man4/i386/viapm.4.gz
.endif
# 20100122: move BSDL bc/dc USD documents to /usr/share/doc/usd
OLD_FILES+=usr/share/doc/papers/bc.ascii.gz
OLD_FILES+=usr/share/doc/papers/dc.ascii.gz
# 20100120: replacing GNU bc/dc with BSDL versions
OLD_FILES+=usr/share/examples/bc/ckbook.b
OLD_FILES+=usr/share/examples/bc/pi.b
OLD_FILES+=usr/share/examples/bc/primes.b
OLD_FILES+=usr/share/examples/bc/twins.b
OLD_FILES+=usr/share/info/dc.info.gz
OLD_DIRS+=usr/share/examples/bc
# 20100114: removal of ttyslot(3)
OLD_FILES+=usr/share/man/man3/ttyslot.3.gz
# 20100113: remove utmp.h, replace it by utmpx.h
OLD_FILES+=usr/share/man/man3/login.3.gz
OLD_FILES+=usr/share/man/man3/logout.3.gz
OLD_FILES+=usr/share/man/man3/logwtmp.3.gz
OLD_FILES+=usr/share/man/man3/ulog_endutxent.3.gz
OLD_FILES+=usr/share/man/man3/ulog_getutxent.3.gz
OLD_FILES+=usr/share/man/man3/ulog_getutxline.3.gz
OLD_FILES+=usr/share/man/man3/ulog_getutxuser.3.gz
OLD_FILES+=usr/share/man/man3/ulog_pututxline.3.gz
OLD_FILES+=usr/share/man/man3/ulog_setutxent.3.gz
OLD_FILES+=usr/share/man/man3/ulog_setutxfile.3.gz
OLD_FILES+=usr/share/man/man5/lastlog.5.gz
OLD_FILES+=usr/share/man/man5/utmp.5.gz
OLD_FILES+=usr/share/man/man5/wtmp.5.gz
OLD_LIBS+=lib/libutil.so.8
# 20100105: new userland semaphore implementation
OLD_FILES+=usr/include/sys/semaphore.h
# 20100103: ntptrace(8) removed
OLD_FILES+=usr/sbin/ntptrace
OLD_FILES+=usr/share/man/man8/ntptrace.8.gz
# 20091229: remove no longer relevant examples
OLD_FILES+=usr/share/examples/pppd/auth-down.sample
OLD_FILES+=usr/share/examples/pppd/auth-up.sample
OLD_FILES+=usr/share/examples/pppd/chap-secrets.sample
OLD_FILES+=usr/share/examples/pppd/chat.sh.sample
OLD_FILES+=usr/share/examples/pppd/ip-down.sample
OLD_FILES+=usr/share/examples/pppd/ip-up.sample
OLD_FILES+=usr/share/examples/pppd/options.sample
OLD_FILES+=usr/share/examples/pppd/pap-secrets.sample
OLD_FILES+=usr/share/examples/pppd/ppp.deny.sample
OLD_FILES+=usr/share/examples/pppd/ppp.shells.sample
OLD_DIRS+=usr/share/examples/pppd
OLD_FILES+=usr/share/examples/slattach/unit-command.sh
OLD_DIRS+=usr/share/examples/slattach
OLD_FILES+=usr/share/examples/sliplogin/slip.hosts
OLD_FILES+=usr/share/examples/sliplogin/slip.login
OLD_FILES+=usr/share/examples/sliplogin/slip.logout
OLD_FILES+=usr/share/examples/sliplogin/slip.slparms
OLD_DIRS+=usr/share/examples/sliplogin
OLD_FILES+=usr/share/examples/startslip/sldown.sh
OLD_FILES+=usr/share/examples/startslip/slip.sh
OLD_FILES+=usr/share/examples/startslip/slup.sh
OLD_DIRS+=usr/share/examples/startslip
# 20091202: unify rc.firewall and rc.firewall6
OLD_FILES+=etc/rc.d/ip6fw
OLD_FILES+=etc/rc.firewall6
OLD_FILES+=usr/share/examples/etc/rc.firewall6
# 20091117: removal of rc.early(8) link
OLD_FILES+=usr/share/man/man8/rc.early.8.gz
# 20091027: pselect.3 implemented as syscall
OLD_FILES+=usr/share/man/man3/pselect.3.gz
# 20091005: fusword.9 and susword.9 removed
OLD_FILES+=usr/share/man/man9/fusword.9.gz
OLD_FILES+=usr/share/man/man9/susword.9.gz
# 20090909: vesa and dpms promoted to be i386/amd64 common
OLD_FILES+=usr/include/machine/pc/vesa.h
OLD_FILES+=usr/share/man/man4/i386/dpms.4.gz
# 20090904: remove lukemftpd
OLD_FILES+=usr/libexec/lukemftpd
OLD_FILES+=usr/share/man/man5/ftpd.conf.5.gz
OLD_FILES+=usr/share/man/man5/ftpusers.5.gz
OLD_FILES+=usr/share/man/man8/lukemftpd.8.gz
# 20090902: BSD.{x11,x11-4}.dist are dead and BSD.local.dist lives in ports/
OLD_FILES+=etc/mtree/BSD.local.dist
OLD_FILES+=etc/mtree/BSD.x11.dist
OLD_FILES+=etc/mtree/BSD.x11-4.dist
# 20090812: net80211 documentation overhaul
OLD_FILES+=usr/share/man/man9/ieee80211_add_rates.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_add_xrates.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_alloc_node.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_attach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_begin_scan.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_cfgget.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_cfgset.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_chan2ieee.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_chan2mode.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_create_ibss.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_crypto_attach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_crypto_detach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_decap.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_dump_pkt.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_dup_bss.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_encap.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_end_scan.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_find_node.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_fix_rate.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_free_allnodes.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_ieee2mhz.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_ioctl.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_lookup_node.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_media2rate.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_media_change.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_media_init.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_media_status.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_mhz2ieee.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_next_scan.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_node_attach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_node_detach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_node_lateattach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_print_essid.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_proto_attach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_proto_detach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_rate2media.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_recv_mgmt.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_send_mgmt.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_setmode.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_timeout_nodes.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_watchdog.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_wep_crypt.9.gz
# 20090801: vimage.h removed in favour of vnet.h
OLD_FILES+=usr/include/sys/vimage.h
# 20101208: libbsnmp was moved to usr/lib
MOVED_LIBS+=lib/libbsnmp.so.5
# 20090719: library version bump for 8.0
OLD_LIBS+=lib/libalias.so.6
OLD_LIBS+=lib/libavl.so.1
OLD_LIBS+=lib/libbegemot.so.3
OLD_LIBS+=lib/libbsdxml.so.3
OLD_LIBS+=lib/libbsnmp.so.4
OLD_LIBS+=lib/libcam.so.4
OLD_LIBS+=lib/libcrypt.so.4
OLD_LIBS+=lib/libcrypto.so.5
OLD_LIBS+=lib/libctf.so.1
OLD_LIBS+=lib/libdevstat.so.6
OLD_LIBS+=lib/libdtrace.so.1
OLD_LIBS+=lib/libedit.so.6
OLD_LIBS+=lib/libgeom.so.4
OLD_LIBS+=lib/libipsec.so.3
OLD_LIBS+=lib/libipx.so.4
OLD_LIBS+=lib/libkiconv.so.3
OLD_LIBS+=lib/libkvm.so.4
OLD_LIBS+=lib/libmd.so.4
OLD_LIBS+=lib/libncurses.so.7
OLD_LIBS+=lib/libncursesw.so.7
OLD_LIBS+=lib/libnvpair.so.1
OLD_LIBS+=lib/libpcap.so.6
OLD_LIBS+=lib/libreadline.so.7
OLD_LIBS+=lib/libsbuf.so.4
OLD_LIBS+=lib/libufs.so.4
OLD_LIBS+=lib/libumem.so.1
OLD_LIBS+=lib/libutil.so.7
OLD_LIBS+=lib/libuutil.so.1
OLD_LIBS+=lib/libz.so.4
OLD_LIBS+=lib/libzfs.so.1
OLD_LIBS+=lib/libzpool.so.1
OLD_LIBS+=usr/lib/libarchive.so.4
OLD_LIBS+=usr/lib/libauditd.so.4
OLD_LIBS+=usr/lib/libbluetooth.so.3
OLD_LIBS+=usr/lib/libbsm.so.2
OLD_LIBS+=usr/lib/libbz2.so.3
OLD_LIBS+=usr/lib/libcalendar.so.4
OLD_LIBS+=usr/lib/libcom_err.so.4
OLD_LIBS+=usr/lib/libdevinfo.so.4
OLD_LIBS+=usr/lib/libdialog.so.6
OLD_LIBS+=usr/lib/libdwarf.so.1
OLD_LIBS+=usr/lib/libfetch.so.5
OLD_LIBS+=usr/lib/libform.so.4
OLD_LIBS+=usr/lib/libformw.so.4
OLD_LIBS+=usr/lib/libftpio.so.7
OLD_LIBS+=usr/lib/libgnuregex.so.4
OLD_LIBS+=usr/lib/libgpib.so.2
OLD_LIBS+=usr/lib/libhistory.so.7
OLD_LIBS+=usr/lib/libmagic.so.3
OLD_LIBS+=usr/lib/libmemstat.so.2
OLD_LIBS+=usr/lib/libmenu.so.4
OLD_LIBS+=usr/lib/libmenuw.so.4
OLD_LIBS+=usr/lib/libmilter.so.4
OLD_LIBS+=usr/lib/libncp.so.3
OLD_LIBS+=usr/lib/libnetgraph.so.3
OLD_LIBS+=usr/lib/libngatm.so.3
OLD_LIBS+=usr/lib/libobjc.so.3
OLD_LIBS+=usr/lib/libopie.so.5
OLD_LIBS+=usr/lib/libpam.so.4
OLD_LIBS+=usr/lib/libpanel.so.4
OLD_LIBS+=usr/lib/libpanelw.so.4
OLD_LIBS+=usr/lib/libpmc.so.4
OLD_LIBS+=usr/lib/libproc.so.1
OLD_LIBS+=usr/lib/libradius.so.3
OLD_LIBS+=usr/lib/librpcsvc.so.4
OLD_LIBS+=usr/lib/libsdp.so.3
OLD_LIBS+=usr/lib/libsmb.so.3
OLD_LIBS+=usr/lib/libssh.so.4
OLD_LIBS+=usr/lib/libssl.so.5
OLD_LIBS+=usr/lib/libtacplus.so.3
OLD_LIBS+=usr/lib/libugidfw.so.3
OLD_LIBS+=usr/lib/libusb.so.1
OLD_LIBS+=usr/lib/libusbhid.so.3
OLD_LIBS+=usr/lib/libvgl.so.5
OLD_LIBS+=usr/lib/libwrap.so.5
OLD_LIBS+=usr/lib/libypclnt.so.3
OLD_LIBS+=usr/lib/pam_chroot.so.4
OLD_LIBS+=usr/lib/pam_deny.so.4
OLD_LIBS+=usr/lib/pam_echo.so.4
OLD_LIBS+=usr/lib/pam_exec.so.4
OLD_LIBS+=usr/lib/pam_ftpusers.so.4
OLD_LIBS+=usr/lib/pam_group.so.4
OLD_LIBS+=usr/lib/pam_guest.so.4
OLD_LIBS+=usr/lib/pam_krb5.so.4
OLD_LIBS+=usr/lib/pam_ksu.so.4
OLD_LIBS+=usr/lib/pam_lastlog.so.4
OLD_LIBS+=usr/lib/pam_login_access.so.4
OLD_LIBS+=usr/lib/pam_nologin.so.4
OLD_LIBS+=usr/lib/pam_opie.so.4
OLD_LIBS+=usr/lib/pam_opieaccess.so.4
OLD_LIBS+=usr/lib/pam_passwdqc.so.4
OLD_LIBS+=usr/lib/pam_permit.so.4
OLD_LIBS+=usr/lib/pam_radius.so.4
OLD_LIBS+=usr/lib/pam_rhosts.so.4
OLD_LIBS+=usr/lib/pam_rootok.so.4
OLD_LIBS+=usr/lib/pam_securetty.so.4
OLD_LIBS+=usr/lib/pam_self.so.4
OLD_LIBS+=usr/lib/pam_ssh.so.4
OLD_LIBS+=usr/lib/pam_tacplus.so.4
OLD_LIBS+=usr/lib/pam_unix.so.4
OLD_LIBS+=usr/lib/snmp_atm.so.5
OLD_LIBS+=usr/lib/snmp_bridge.so.5
OLD_LIBS+=usr/lib/snmp_hostres.so.5
OLD_LIBS+=usr/lib/snmp_mibII.so.5
OLD_LIBS+=usr/lib/snmp_netgraph.so.5
OLD_LIBS+=usr/lib/snmp_pf.so.5
# 20090718: the gdm pam.d file is no longer required
OLD_FILES+=etc/pam.d/gdm
# 20090714: net_add_domain(9) renamed to domain_add(9)
OLD_FILES+=usr/share/man/man9/net_add_domain.9.gz
# 20090713: vimage container structs removed
OLD_FILES+=usr/include/netinet/vinet.h
OLD_FILES+=usr/include/netinet6/vinet6.h
OLD_FILES+=usr/include/netipsec/vipsec.h
# 20090712: ieee80211.4 -> net80211.4
OLD_FILES+=usr/share/man/man4/ieee80211.4.gz
# 20090711: typo fixed, kproc_resume,.9 -> kproc_resume.9
OLD_FILES+=usr/share/man/man9/kproc_resume,.9.gz
# 20090709: msgctl.3 msgget.3 msgrcv.3 msgsnd.3 manual pages moved
OLD_FILES+=usr/share/man/man3/msgctl.3.gz
OLD_FILES+=usr/share/man/man3/msgget.3.gz
OLD_FILES+=usr/share/man/man3/msgrcv.3.gz
OLD_FILES+=usr/share/man/man3/msgsnd.3.gz
# 20090630: old kernel RPC implementation removal
OLD_FILES+=usr/include/nfs/rpcv2.h
# 20090624: update usbdi(9)
OLD_FILES+=usr/share/man/man9/usbd_abort_default_pipe.9.gz
OLD_FILES+=usr/share/man/man9/usbd_abort_pipe.9.gz
OLD_FILES+=usr/share/man/man9/usbd_alloc_buffer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_alloc_xfer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_clear_endpoint_stall.9.gz
OLD_FILES+=usr/share/man/man9/usbd_clear_endpoint_stall_async.9.gz
OLD_FILES+=usr/share/man/man9/usbd_clear_endpoint_toggle.9.gz
OLD_FILES+=usr/share/man/man9/usbd_close_pipe.9.gz
OLD_FILES+=usr/share/man/man9/usbd_device2interface_handle.9.gz
OLD_FILES+=usr/share/man/man9/usbd_do_request_async.9.gz
OLD_FILES+=usr/share/man/man9/usbd_do_request_flags_pipe.9.gz
OLD_FILES+=usr/share/man/man9/usbd_endpoint_count.9.gz
OLD_FILES+=usr/share/man/man9/usbd_find_edesc.9.gz
OLD_FILES+=usr/share/man/man9/usbd_find_idesc.9.gz
OLD_FILES+=usr/share/man/man9/usbd_free_buffer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_free_xfer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_buffer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_config.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_config_desc.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_config_desc_full.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_config_descriptor.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_device_descriptor.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_endpoint_descriptor.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_interface_altindex.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_interface_descriptor.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_no_alts.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_quirks.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_speed.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_string.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_string_desc.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_xfer_status.9.gz
OLD_FILES+=usr/share/man/man9/usbd_interface2device_handle.9.gz
OLD_FILES+=usr/share/man/man9/usbd_interface2endpoint_descriptor.9.gz
OLD_FILES+=usr/share/man/man9/usbd_interface_count.9.gz
OLD_FILES+=usr/share/man/man9/usbd_open_pipe.9.gz
OLD_FILES+=usr/share/man/man9/usbd_open_pipe_intr.9.gz
OLD_FILES+=usr/share/man/man9/usbd_pipe2device_handle.9.gz
OLD_FILES+=usr/share/man/man9/usbd_set_config_index.9.gz
OLD_FILES+=usr/share/man/man9/usbd_set_config_no.9.gz
OLD_FILES+=usr/share/man/man9/usbd_set_interface.9.gz
OLD_FILES+=usr/share/man/man9/usbd_setup_default_xfer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_setup_isoc_xfer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_setup_xfer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_sync_transfer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_transfer.9.gz
OLD_FILES+=usr/share/man/man9/usb_find_desc.9.gz
# 20090623: number of headers needed for a usb driver reduced
OLD_FILES+=usr/include/dev/usb/usb_defs.h
OLD_FILES+=usr/include/dev/usb/usb_error.h
OLD_FILES+=usr/include/dev/usb/usb_handle_request.h
OLD_FILES+=usr/include/dev/usb/usb_hid.h
OLD_FILES+=usr/include/dev/usb/usb_lookup.h
OLD_FILES+=usr/include/dev/usb/usb_mfunc.h
OLD_FILES+=usr/include/dev/usb/usb_parse.h
OLD_FILES+=usr/include/dev/usb/usb_revision.h
# 20090609: devclass_add_driver is no longer public
OLD_FILES+=usr/share/man/man9/devclass_add_driver.9.gz
OLD_FILES+=usr/share/man/man9/devclass_delete_driver.9.gz
OLD_FILES+=usr/share/man/man9/devclass_find_driver.9.gz
# 20090605: removal of clists
OLD_FILES+=usr/include/sys/clist.h
# 20090602: removal of window(1)
OLD_FILES+=usr/bin/window
OLD_FILES+=usr/share/man/man1/window.1.gz
# 20090531: bind 9.6.1rc1 import
OLD_LIBS+=usr/lib/liblwres.so.30
# 20090530: removal of early.sh
OLD_FILES+=etc/rc.d/early.sh
# 20090527: renaming of S{LIST,TAILQ}_REMOVE_NEXT() to _REMOVE_AFTER()
OLD_FILES+=usr/share/man/man3/SLIST_REMOVE_NEXT.3.gz
OLD_FILES+=usr/share/man/man3/STAILQ_REMOVE_NEXT.3.gz
# 20090527: removal of legacy USB stack
OLD_FILES+=usr/include/legacy/dev/usb/dsbr100io.h
OLD_FILES+=usr/include/legacy/dev/usb/ehcireg.h
OLD_FILES+=usr/include/legacy/dev/usb/ehcivar.h
OLD_FILES+=usr/include/legacy/dev/usb/hid.h
OLD_FILES+=usr/include/legacy/dev/usb/if_urtwreg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_urtwvar.h
OLD_FILES+=usr/include/legacy/dev/usb/ohcireg.h
OLD_FILES+=usr/include/legacy/dev/usb/ohcivar.h
OLD_FILES+=usr/include/legacy/dev/usb/rio500_usb.h
OLD_FILES+=usr/include/legacy/dev/usb/rt2573_ucode.h
OLD_FILES+=usr/include/legacy/dev/usb/sl811hsreg.h
OLD_FILES+=usr/include/legacy/dev/usb/sl811hsvar.h
OLD_FILES+=usr/include/legacy/dev/usb/ubser.h
OLD_FILES+=usr/include/legacy/dev/usb/ucomvar.h
OLD_FILES+=usr/include/legacy/dev/usb/udbp.h
OLD_FILES+=usr/include/legacy/dev/usb/uftdireg.h
OLD_FILES+=usr/include/legacy/dev/usb/ugraphire_rdesc.h
OLD_FILES+=usr/include/legacy/dev/usb/uhcireg.h
OLD_FILES+=usr/include/legacy/dev/usb/uhcivar.h
OLD_FILES+=usr/include/legacy/dev/usb/usb.h
OLD_FILES+=usr/include/legacy/dev/usb/usb_mem.h
OLD_FILES+=usr/include/legacy/dev/usb/usb_port.h
OLD_FILES+=usr/include/legacy/dev/usb/usb_quirks.h
OLD_FILES+=usr/include/legacy/dev/usb/usbcdc.h
OLD_FILES+=usr/include/legacy/dev/usb/usbdi.h
OLD_FILES+=usr/include/legacy/dev/usb/usbdi_util.h
OLD_FILES+=usr/include/legacy/dev/usb/usbdivar.h
OLD_FILES+=usr/include/legacy/dev/usb/usbhid.h
OLD_FILES+=usr/include/legacy/dev/usb/uxb360gp_rdesc.h
OLD_DIRS+=usr/include/legacy/dev/usb
OLD_DIRS+=usr/include/legacy/dev
OLD_DIRS+=usr/include/legacy
# 20090526: removal of makekey(8)
OLD_FILES+=usr/libexec/makekey
OLD_FILES+=usr/share/man/man8/makekey.8.gz
# 20090522: removal of University of Michigan NFSv4 client
OLD_FILES+=etc/rc.d/idmapd
OLD_FILES+=sbin/idmapd
OLD_FILES+=sbin/mount_nfs4
OLD_FILES+=usr/share/man/man8/idmapd.8.gz
OLD_FILES+=usr/share/man/man8/mount_nfs4.8.gz
# 20090513: removal of legacy versions of USB network interface drivers
OLD_FILES+=usr/include/legacy/dev/usb/if_upgtvar.h
OLD_FILES+=usr/include/legacy/dev/usb/usb_ethersubr.h
# 20090417: removal of legacy versions of USB network interface drivers
OLD_FILES+=usr/include/legacy/dev/usb/if_auereg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_axereg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_cdcereg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_cuereg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_kuereg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_ruereg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_rumreg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_rumvar.h
OLD_FILES+=usr/include/legacy/dev/usb/if_udavreg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_uralreg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_uralvar.h
OLD_FILES+=usr/include/legacy/dev/usb/if_zydfw.h
OLD_FILES+=usr/include/legacy/dev/usb/if_zydreg.h
OLD_FILES+=usr/include/legacy/dev/usb/kue_fw.h
# 20090416: removal of ar(4), ray(4), sr(4), raycontrol(8)
OLD_FILES+=usr/sbin/raycontrol
OLD_FILES+=usr/share/man/man4/i386/ar.4.gz
OLD_FILES+=usr/share/man/man4/i386/ray.4.gz
OLD_FILES+=usr/share/man/man4/i386/sr.4.gz
OLD_FILES+=usr/share/man/man8/raycontrol.8.gz
# 20090410: VOP_LEASE.9 removed
OLD_FILES+=usr/share/man/man9/VOP_LEASE.9.gz
# 20090406: usb_sw_transfer.h removed
OLD_FILES+=usr/include/dev/usb/usb_sw_transfer.h
# 20090405: removal of if_ppp(4) and if_sl(4)
OLD_FILES+=sbin/slattach rescue/slattach
OLD_FILES+=sbin/startslip rescue/startslip
OLD_FILES+=usr/include/net/if_ppp.h
OLD_FILES+=usr/include/net/if_pppvar.h
OLD_FILES+=usr/include/net/if_slvar.h
OLD_FILES+=usr/include/net/ppp_comp.h
OLD_FILES+=usr/include/net/slip.h
OLD_FILES+=usr/sbin/sliplogin
OLD_FILES+=usr/sbin/slstat
OLD_FILES+=usr/sbin/pppd
OLD_FILES+=usr/sbin/pppstats
OLD_FILES+=usr/share/man/man1/startslip.1.gz
OLD_FILES+=usr/share/man/man4/if_ppp.4.gz
OLD_FILES+=usr/share/man/man4/if_sl.4.gz
OLD_FILES+=usr/share/man/man4/ppp.4.gz
OLD_FILES+=usr/share/man/man4/sl.4.gz
OLD_FILES+=usr/share/man/man8/pppd.8.gz
OLD_FILES+=usr/share/man/man8/pppstats.8.gz
OLD_FILES+=usr/share/man/man8/slattach.8.gz
OLD_FILES+=usr/share/man/man8/slip.8.gz
OLD_FILES+=usr/share/man/man8/sliplogin.8.gz
OLD_FILES+=usr/share/man/man8/slstat.8.gz
# 20090321: libpcap upgraded to 1.0.0
OLD_LIBS+=lib/libpcap.so.5
# 20090319: uscanner(4) has been removed
OLD_FILES+=usr/share/man/man4/uscanner.4.gz
# 20090313: k8temp(4) renamed to amdtemp(4)
OLD_FILES+=usr/share/man/man4/k8temp.4.gz
# 20090308: libusb.so.1 renamed
OLD_LIBS+=usr/lib/libusb20.so.1
OLD_FILES+=usr/lib/libusb20.a
OLD_FILES+=usr/lib/libusb20.so
OLD_FILES+=usr/lib/libusb20_p.a
OLD_FILES+=usr/include/libusb20_compat01.h
OLD_FILES+=usr/include/libusb20_compat10.h
# 20090226: libmp(3) functions renamed
OLD_LIBS+=usr/lib/libmp.so.6
# 20090223: changeover of USB stacks
OLD_FILES+=usr/include/dev/usb2/include/ufm2_ioctl.h
OLD_FILES+=usr/include/dev/usb2/include/urio2_ioctl.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_cdc.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_defs.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_devid.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_devtable.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_endian.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_error.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_hid.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_ioctl.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_mfunc.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_revision.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_standard.h
OLD_DIRS+=usr/include/dev/usb2/include
OLD_DIRS+=usr/include/dev/usb2
OLD_FILES+=usr/include/dev/usb/dsbr100io.h
OLD_FILES+=usr/include/dev/usb/ehcireg.h
OLD_FILES+=usr/include/dev/usb/ehcivar.h
OLD_FILES+=usr/include/dev/usb/hid.h
OLD_FILES+=usr/include/dev/usb/if_auereg.h
OLD_FILES+=usr/include/dev/usb/if_axereg.h
OLD_FILES+=usr/include/dev/usb/if_cdcereg.h
OLD_FILES+=usr/include/dev/usb/if_cuereg.h
OLD_FILES+=usr/include/dev/usb/if_kuereg.h
OLD_FILES+=usr/include/dev/usb/if_ruereg.h
OLD_FILES+=usr/include/dev/usb/if_rumreg.h
OLD_FILES+=usr/include/dev/usb/if_rumvar.h
OLD_FILES+=usr/include/dev/usb/if_udavreg.h
OLD_FILES+=usr/include/dev/usb/if_upgtvar.h
OLD_FILES+=usr/include/dev/usb/if_uralreg.h
OLD_FILES+=usr/include/dev/usb/if_uralvar.h
OLD_FILES+=usr/include/dev/usb/if_urtwreg.h
OLD_FILES+=usr/include/dev/usb/if_urtwvar.h
OLD_FILES+=usr/include/dev/usb/if_zydfw.h
OLD_FILES+=usr/include/dev/usb/if_zydreg.h
OLD_FILES+=usr/include/dev/usb/kue_fw.h
OLD_FILES+=usr/include/dev/usb/ohcireg.h
OLD_FILES+=usr/include/dev/usb/ohcivar.h
OLD_FILES+=usr/include/dev/usb/rio500_usb.h
OLD_FILES+=usr/include/dev/usb/rt2573_ucode.h
OLD_FILES+=usr/include/dev/usb/sl811hsreg.h
OLD_FILES+=usr/include/dev/usb/sl811hsvar.h
OLD_FILES+=usr/include/dev/usb/ubser.h
OLD_FILES+=usr/include/dev/usb/ucomvar.h
OLD_FILES+=usr/include/dev/usb/udbp.h
OLD_FILES+=usr/include/dev/usb/uftdireg.h
OLD_FILES+=usr/include/dev/usb/ugraphire_rdesc.h
OLD_FILES+=usr/include/dev/usb/uhcireg.h
OLD_FILES+=usr/include/dev/usb/uhcivar.h
OLD_FILES+=usr/include/dev/usb/usb_ethersubr.h
OLD_FILES+=usr/include/dev/usb/usb_mem.h
OLD_FILES+=usr/include/dev/usb/usb_port.h
OLD_FILES+=usr/include/dev/usb/usb_quirks.h
OLD_FILES+=usr/include/dev/usb/usbcdc.h
OLD_FILES+=usr/include/dev/usb/usbdivar.h
OLD_FILES+=usr/include/dev/usb/uxb360gp_rdesc.h
OLD_FILES+=usr/sbin/usbdevs
OLD_FILES+=usr/share/man/man8/usbdevs.8.gz
# 20090203: removal of pccard header files
OLD_FILES+=usr/include/pccard/cardinfo.h
OLD_FILES+=usr/include/pccard/cis.h
OLD_DIRS+=usr/include/pccard
# 20090203: adding_user.8 moved to adding_user.7
OLD_FILES+=usr/share/man/man8/adding_user.8.gz
# 20090102: file 4.26 import
OLD_FILES+=usr/share/misc/magic.mime
OLD_FILES+=usr/share/misc/magic.mime.mgc
# 20081223: bind 9.4.3 import, nsupdate.8 moved to nsupdate.1
OLD_FILES+=usr/share/man/man8/nsupdate.8.gz
# 20081223: ipprotosw.h removed
OLD_FILES+=usr/include/netinet/ipprotosw.h
# 20081123: vfs_mountedon.9 removed
OLD_FILES+=usr/share/man/man9/vfs_mountedon.9.gz
# 20081023: FREE.9 and MALLOC.9 removed
OLD_FILES+=usr/share/man/man9/FREE.9.gz
OLD_FILES+=usr/share/man/man9/MALLOC.9.gz
# 20080928: removal of inaccurate device_ids(9) manual page
OLD_FILES+=usr/share/man/man9/device_ids.9.gz
OLD_FILES+=usr/share/man/man9/major.9.gz
OLD_FILES+=usr/share/man/man9/minor.9.gz
OLD_FILES+=usr/share/man/man9/umajor.9.gz
OLD_FILES+=usr/share/man/man9/uminor.9.gz
# 20080917: removal of manpage for axed kernel primitive suser(9)
OLD_FILES+=usr/share/man/man9/suser.9.gz
OLD_FILES+=usr/share/man/man9/suser_cred.9.gz
# 20080913: pax removed from rescue
OLD_FILES+=rescue/pax
# 20080823: removal of unneeded pt_chown, to implement grantpt(3)
OLD_FILES+=usr/libexec/pt_chown
# 20080822: ntp 4.2.4p5 import
OLD_FILES+=usr/share/doc/ntp/driver23.html
OLD_FILES+=usr/share/doc/ntp/driver24.html
# 20080821: several man pages moved from man4.i386 to man4
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/share/man/man4/i386/acpi_aiboost.4.gz
OLD_FILES+=usr/share/man/man4/i386/acpi_asus.4.gz
OLD_FILES+=usr/share/man/man4/i386/acpi_fujitsu.4.gz
OLD_FILES+=usr/share/man/man4/i386/acpi_ibm.4.gz
OLD_FILES+=usr/share/man/man4/i386/acpi_panasonic.4.gz
OLD_FILES+=usr/share/man/man4/i386/acpi_sony.4.gz
OLD_FILES+=usr/share/man/man4/i386/acpi_toshiba.4.gz
OLD_FILES+=usr/share/man/man4/i386/ichwd.4.gz
OLD_FILES+=usr/share/man/man4/i386/if_ndis.4.gz
OLD_FILES+=usr/share/man/man4/i386/io.4.gz
OLD_FILES+=usr/share/man/man4/i386/linux.4.gz
OLD_FILES+=usr/share/man/man4/i386/ndis.4.gz
.endif
# 20080820: MPSAFE TTY layer integrated
OLD_FILES+=usr/include/sys/linedisc.h
OLD_FILES+=usr/share/man/man3/posix_openpt.3.gz
# 20080725: sgtty.h removed
OLD_FILES+=usr/include/sgtty.h
# 20080706: bsdlabel(8) removed on powerpc
.if ${TARGET_ARCH} == "powerpc"
OLD_FILES+=sbin/bsdlabel
OLD_FILES+=usr/share/man/man8/bsdlabel.8.gz
.endif
# 20080704: sbsh(4) removed
OLD_FILES+=usr/share/man/man4/if_sbsh.4.gz
OLD_FILES+=usr/share/man/man4/sbsh.4.gz
# 20080704: cnw(4) removed
OLD_FILES+=usr/share/man/man4/if_cnw.4.gz
OLD_FILES+=usr/share/man/man4/cnw.4.gz
# 20080704: oltr(4) removed
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/share/man/man4/i386/if_oltr.4.gz
OLD_FILES+=usr/share/man/man4/i386/oltr.4.gz
.endif
# 20080704: arl(4) removed
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/sbin/arlcontrol
OLD_FILES+=usr/share/man/man4/i386/arl.4.gz
OLD_FILES+=usr/share/man/man8/arlcontrol.8.gz
.endif
# 20080703: sunlabel only for sparc64
.if ${TARGET_ARCH} != "sparc64"
OLD_FILES+=sbin/sunlabel
OLD_FILES+=usr/share/man/man8/sunlabel.8.gz
.endif
# 20080701: wpa_supplicant.conf moved to share/examples/etc/
OLD_FILES+=usr/share/examples/wpa_supplicant/wpa_supplicant.conf
OLD_DIRS+=usr/share/examples/wpa_supplicant
# 20080614: pecoff image activator removed
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/machine/pecoff_machdep.h
.endif
# 20080614: sgtty removed
OLD_FILES+=usr/include/sys/ttychars.h
OLD_FILES+=usr/include/sys/ttydev.h
OLD_FILES+=usr/share/man/man3/gtty.3.gz
OLD_FILES+=usr/share/man/man3/stty.3.gz
# 20080609: gpt(8) removed
OLD_FILES+=sbin/gpt
OLD_FILES+=usr/share/man/man8/gpt.8.gz
# 20080525: I4B removed
OLD_FILES+=etc/isdn/answer
OLD_FILES+=etc/isdn/isdntel
OLD_FILES+=etc/isdn/record
OLD_FILES+=etc/isdn/tell
OLD_FILES+=etc/isdn/tell-record
OLD_FILES+=etc/isdn/unknown_incoming
OLD_FILES+=etc/isdn/holidays.D
OLD_FILES+=etc/isdn/isdnd.rates.A
OLD_FILES+=etc/isdn/isdnd.rates.D
OLD_FILES+=etc/isdn/isdnd.rates.F
OLD_FILES+=etc/isdn/isdnd.rates.L
OLD_FILES+=etc/isdn/isdnd.rates.UK.BT
OLD_FILES+=etc/isdn/isdnd.rc.sample
OLD_FILES+=etc/isdn/isdntel.alias.sample
OLD_DIRS+=etc/isdn
OLD_FILES+=etc/rc.d/isdnd
OLD_FILES+=usr/include/i4b/i4b_cause.h
OLD_FILES+=usr/include/i4b/i4b_debug.h
OLD_FILES+=usr/include/i4b/i4b_ioctl.h
OLD_FILES+=usr/include/i4b/i4b_rbch_ioctl.h
OLD_FILES+=usr/include/i4b/i4b_tel_ioctl.h
OLD_FILES+=usr/include/i4b/i4b_trace.h
OLD_DIRS+=usr/include/i4b
OLD_FILES+=usr/sbin/dtmfdecode
OLD_FILES+=usr/sbin/g711conv
OLD_FILES+=usr/sbin/isdnd
OLD_FILES+=usr/sbin/isdndebug
OLD_FILES+=usr/sbin/isdndecode
OLD_FILES+=usr/sbin/isdnmonitor
OLD_FILES+=usr/sbin/isdnphone
OLD_FILES+=usr/sbin/isdntel
OLD_FILES+=usr/sbin/isdntelctl
OLD_FILES+=usr/sbin/isdntrace
OLD_FILES+=usr/share/isdn/0.al
OLD_FILES+=usr/share/isdn/1.al
OLD_FILES+=usr/share/isdn/2.al
OLD_FILES+=usr/share/isdn/3.al
OLD_FILES+=usr/share/isdn/4.al
OLD_FILES+=usr/share/isdn/5.al
OLD_FILES+=usr/share/isdn/6.al
OLD_FILES+=usr/share/isdn/7.al
OLD_FILES+=usr/share/isdn/8.al
OLD_FILES+=usr/share/isdn/9.al
OLD_FILES+=usr/share/isdn/beep.al
OLD_FILES+=usr/share/isdn/msg.al
OLD_DIRS+=usr/share/isdn
OLD_FILES+=usr/share/man/man1/dtmfdecode.1.gz
OLD_FILES+=usr/share/man/man1/g711conv.1.gz
OLD_FILES+=usr/share/man/man4/i4b.4.gz
OLD_FILES+=usr/share/man/man4/i4bcapi.4.gz
OLD_FILES+=usr/share/man/man4/i4bctl.4.gz
OLD_FILES+=usr/share/man/man4/i4bing.4.gz
OLD_FILES+=usr/share/man/man4/i4bipr.4.gz
OLD_FILES+=usr/share/man/man4/i4bisppp.4.gz
OLD_FILES+=usr/share/man/man4/i4bq921.4.gz
OLD_FILES+=usr/share/man/man4/i4bq931.4.gz
OLD_FILES+=usr/share/man/man4/i4brbch.4.gz
OLD_FILES+=usr/share/man/man4/i4btel.4.gz
OLD_FILES+=usr/share/man/man4/i4btrc.4.gz
OLD_FILES+=usr/share/man/man4/iavc.4.gz
OLD_FILES+=usr/share/man/man4/isic.4.gz
OLD_FILES+=usr/share/man/man4/ifpi.4.gz
OLD_FILES+=usr/share/man/man4/ifpi2.4.gz
OLD_FILES+=usr/share/man/man4/ifpnp.4.gz
OLD_FILES+=usr/share/man/man4/ihfc.4.gz
OLD_FILES+=usr/share/man/man4/itjc.4.gz
OLD_FILES+=usr/share/man/man4/iwic.4.gz
OLD_FILES+=usr/share/man/man5/isdnd.rc.5.gz
OLD_FILES+=usr/share/man/man5/isdnd.rates.5.gz
OLD_FILES+=usr/share/man/man5/isdnd.acct.5.gz
OLD_FILES+=usr/share/man/man8/isdnd.8.gz
OLD_FILES+=usr/share/man/man8/isdndebug.8.gz
OLD_FILES+=usr/share/man/man8/isdndecode.8.gz
OLD_FILES+=usr/share/man/man8/isdnmonitor.8.gz
OLD_FILES+=usr/share/man/man8/isdnphone.8.gz
OLD_FILES+=usr/share/man/man8/isdntel.8.gz
OLD_FILES+=usr/share/man/man8/isdntelctl.8.gz
OLD_FILES+=usr/share/man/man8/isdntrace.8.gz
OLD_FILES+=usr/share/examples/isdn/contrib/README
OLD_FILES+=usr/share/examples/isdn/contrib/anleitung.ppp
OLD_FILES+=usr/share/examples/isdn/contrib/answer.c
OLD_FILES+=usr/share/examples/isdn/contrib/answer.sh
OLD_FILES+=usr/share/examples/isdn/contrib/convert.sh
OLD_FILES+=usr/share/examples/isdn/contrib/hplay.c
OLD_FILES+=usr/share/examples/isdn/contrib/i4b-ppp-newbie.txt
OLD_FILES+=usr/share/examples/isdn/contrib/isdnctl
OLD_FILES+=usr/share/examples/isdn/contrib/isdnd_acct
OLD_FILES+=usr/share/examples/isdn/contrib/isdnd_acct.pl
OLD_FILES+=usr/share/examples/isdn/contrib/isdntelmux.c
OLD_FILES+=usr/share/examples/isdn/contrib/mrtg-isp0.sh
OLD_FILES+=usr/share/examples/isdn/i4brunppp/Makefile
OLD_FILES+=usr/share/examples/isdn/i4brunppp/README
OLD_FILES+=usr/share/examples/isdn/i4brunppp/i4brunppp-isdnd.rc
OLD_FILES+=usr/share/examples/isdn/i4brunppp/i4brunppp.8
OLD_FILES+=usr/share/examples/isdn/i4brunppp/i4brunppp.c
OLD_FILES+=usr/share/examples/isdn/v21/Makefile
OLD_FILES+=usr/share/examples/isdn/v21/README
OLD_FILES+=usr/share/examples/isdn/v21/v21modem.c
OLD_FILES+=usr/share/examples/isdn/FAQ
OLD_FILES+=usr/share/examples/isdn/KERNEL
OLD_FILES+=usr/share/examples/isdn/Overview
OLD_FILES+=usr/share/examples/isdn/README
OLD_FILES+=usr/share/examples/isdn/ROADMAP
OLD_FILES+=usr/share/examples/isdn/ReleaseNotes
OLD_FILES+=usr/share/examples/isdn/Resources
OLD_FILES+=usr/share/examples/isdn/SupportedCards
OLD_FILES+=usr/share/examples/isdn/ThankYou
OLD_DIRS+=usr/share/examples/isdn/contrib
OLD_DIRS+=usr/share/examples/isdn/i4brunppp
OLD_DIRS+=usr/share/examples/isdn/v21
OLD_DIRS+=usr/share/examples/isdn
OLD_FILES+=usr/share/examples/ppp/isdnd.rc
OLD_FILES+=usr/share/examples/ppp/ppp.conf.isdn
# 20080525: ng_atmpif removed
OLD_FILES+=usr/include/netgraph/atm/ng_atmpif.h
OLD_FILES+=usr/share/man/man4/ng_atmpif.4.gz
# 20080522: pmap_addr_hint removed
OLD_FILES+=usr/share/man/man9/pmap_addr_hint.9.gz
# 20080517: ipsec_osdep.h removed
OLD_FILES+=usr/include/netipsec/ipsec_osdep.h
# 20080507: heimdal 1.1 import
OLD_LIBS+=usr/lib/libasn1.so.9
OLD_LIBS+=usr/lib/libgssapi.so.9
OLD_LIBS+=usr/lib/libgssapi_krb5.so.9
OLD_LIBS+=usr/lib/libhdb.so.9
OLD_LIBS+=usr/lib/libkadm5clnt.so.9
OLD_LIBS+=usr/lib/libkadm5srv.so.9
OLD_LIBS+=usr/lib/libkafs5.so.9
OLD_LIBS+=usr/lib/libkrb5.so.9
OLD_LIBS+=usr/lib/libroken.so.9
# 20080420: Symbol card support dropped
OLD_FILES+=usr/include/dev/wi/spectrum24t_cf.h
# 20080420: awi removal
OLD_FILES+=usr/share/man/man4/awi.4.gz
OLD_FILES+=usr/share/man/man4/if_awi.4.gz
# 20080331: pkg_sign has been removed
OLD_FILES+=usr/sbin/pkg_check
OLD_FILES+=usr/sbin/pkg_sign
OLD_FILES+=usr/share/man/man1/pkg_check.1.gz
OLD_FILES+=usr/share/man/man1/pkg_sign.1.gz
# 20080314: stack_print(9) mlink fixed
OLD_FILES+=usr/share/man/man9/stack_printf.9.gz
# 20080312: libkse removal
OLD_FILES+=usr/include/sys/kse.h
OLD_FILES+=usr/lib/libkse.so
OLD_LIBS+=usr/lib/libkse.so.3
OLD_FILES+=usr/share/man/man2/kse.2.gz
OLD_FILES+=usr/share/man/man2/kse_create.2.gz
OLD_FILES+=usr/share/man/man2/kse_exit.2.gz
OLD_FILES+=usr/share/man/man2/kse_release.2.gz
OLD_FILES+=usr/share/man/man2/kse_switchin.2.gz
OLD_FILES+=usr/share/man/man2/kse_thr_interrupt.2.gz
OLD_FILES+=usr/share/man/man2/kse_wakeup.2.gz
# 20080225: bsdar/bsdranlib rename to ar/ranlib
OLD_FILES+=usr/bin/bsdar
OLD_FILES+=usr/bin/bsdranlib
OLD_FILES+=usr/share/man/man1/bsdar.1.gz
OLD_FILES+=usr/share/man/man1/bsdranlib.1.gz
# 20080220: geom_lvm rename to geom_linux_lvm
OLD_FILES+=usr/share/man/man4/geom_lvm.4.gz
# 20080126: oldcard.4 removal
OLD_FILES+=usr/share/man/man4/card.4.gz
OLD_FILES+=usr/share/man/man4/oldcard.4.gz
# 20080122: Removed from the tree
OLD_FILES+=usr/share/man/man9/BUF_REFCNT.9.gz
# 20080108: Moved to section 2
OLD_FILES+=usr/share/man/man3/shm_open.3.gz
OLD_FILES+=usr/share/man/man3/shm_unlink.3.gz
# 20071207: Merged with fortunes-o.real
OLD_FILES+=usr/share/games/fortune/fortunes2-o
OLD_FILES+=usr/share/games/fortune/fortunes2-o.dat
# 20071201: Removal of XRPU driver
OLD_FILES+=usr/include/sys/xrpuio.h
# 20071129: Disabled static versions of libkse by default
OLD_FILES+=usr/lib/libkse.a
OLD_FILES+=usr/lib/libkse_p.a
OLD_FILES+=usr/lib/libkse_pic.a
# 20071129: Removed a Solaris compatibility header
OLD_FILES+=usr/include/sys/_elf_solaris.h
# 20071125: Renamed to pmc_get_msr()
OLD_FILES+=usr/share/man/man3/pmc_x86_get_msr.3.gz
# 20071108: Removed very crunch OLDCARD support file
OLD_FILES+=etc/defaults/pccard.conf
# 20071025: rc.d/nfslocking superseded by rc.d/lockd and rc.d/statd
OLD_FILES+=etc/rc.d/nfslocking
# 20070930: rename of cached to nscd
OLD_FILES+=etc/cached.conf
OLD_FILES+=etc/rc.d/cached
OLD_FILES+=usr/sbin/cached
OLD_FILES+=usr/share/man/man5/cached.conf.5.gz
OLD_FILES+=usr/share/man/man8/cached.8.gz
# 20070807: removal of PowerPC specific header file
.if ${TARGET_ARCH} == "powerpc"
OLD_FILES+=usr/include/machine/interruptvar.h
.endif
# 20070801: fast_ipsec.4 gone
OLD_FILES+=usr/share/man/man4/fast_ipsec.4.gz
# 20070715: netatm temporarily disconnected (removed 20080525)
OLD_FILES+=rescue/atm
OLD_FILES+=rescue/fore_dnld
OLD_FILES+=rescue/ilmid
OLD_FILES+=sbin/atm
OLD_FILES+=sbin/fore_dnld
OLD_FILES+=sbin/ilmid
OLD_FILES+=usr/include/libatm.h
OLD_FILES+=usr/include/netatm/atm.h
OLD_FILES+=usr/include/netatm/atm_cm.h
OLD_FILES+=usr/include/netatm/atm_if.h
OLD_FILES+=usr/include/netatm/atm_ioctl.h
OLD_FILES+=usr/include/netatm/atm_pcb.h
OLD_FILES+=usr/include/netatm/atm_sap.h
OLD_FILES+=usr/include/netatm/atm_sigmgr.h
OLD_FILES+=usr/include/netatm/atm_stack.h
OLD_FILES+=usr/include/netatm/atm_sys.h
OLD_FILES+=usr/include/netatm/atm_var.h
OLD_FILES+=usr/include/netatm/atm_vc.h
OLD_FILES+=usr/include/netatm/ipatm/ipatm.h
OLD_FILES+=usr/include/netatm/ipatm/ipatm_serv.h
OLD_FILES+=usr/include/netatm/ipatm/ipatm_var.h
OLD_FILES+=usr/include/netatm/port.h
OLD_FILES+=usr/include/netatm/queue.h
OLD_FILES+=usr/include/netatm/sigpvc/sigpvc_var.h
OLD_FILES+=usr/include/netatm/spans/spans_cls.h
OLD_FILES+=usr/include/netatm/spans/spans_kxdr.h
OLD_FILES+=usr/include/netatm/spans/spans_var.h
OLD_FILES+=usr/include/netatm/uni/sscf_uni.h
OLD_FILES+=usr/include/netatm/uni/sscf_uni_var.h
OLD_FILES+=usr/include/netatm/uni/sscop.h
OLD_FILES+=usr/include/netatm/uni/sscop_misc.h
OLD_FILES+=usr/include/netatm/uni/sscop_pdu.h
OLD_FILES+=usr/include/netatm/uni/sscop_var.h
OLD_FILES+=usr/include/netatm/uni/uni.h
OLD_FILES+=usr/include/netatm/uni/uniip_var.h
OLD_FILES+=usr/include/netatm/uni/unisig.h
OLD_FILES+=usr/include/netatm/uni/unisig_decode.h
OLD_FILES+=usr/include/netatm/uni/unisig_mbuf.h
OLD_FILES+=usr/include/netatm/uni/unisig_msg.h
OLD_FILES+=usr/include/netatm/uni/unisig_print.h
OLD_FILES+=usr/include/netatm/uni/unisig_var.h
OLD_FILES+=usr/lib/libatm.a
OLD_FILES+=usr/lib/libatm_p.a
OLD_FILES+=usr/sbin/atmarpd
OLD_FILES+=usr/sbin/scspd
OLD_FILES+=usr/share/man/en.ISO8859-1/man8/atm.8.gz
OLD_FILES+=usr/share/man/en.ISO8859-1/man8/atmarpd.8.gz
OLD_FILES+=usr/share/man/en.ISO8859-1/man8/fore_dnld.8.gz
OLD_FILES+=usr/share/man/en.ISO8859-1/man8/ilmid.8.gz
OLD_FILES+=usr/share/man/en.ISO8859-1/man8/scspd.8.gz
OLD_FILES+=usr/share/man/man8/atm.8.gz
OLD_FILES+=usr/share/man/man8/atmarpd.8.gz
OLD_FILES+=usr/share/man/man8/fore_dnld.8.gz
OLD_FILES+=usr/share/man/man8/ilmid.8.gz
OLD_FILES+=usr/share/man/man8/scspd.8.gz
OLD_FILES+=usr/share/examples/atm/NOTES
OLD_FILES+=usr/share/examples/atm/README
OLD_FILES+=usr/share/examples/atm/Startup
OLD_FILES+=usr/share/examples/atm/atm-config.sh
OLD_FILES+=usr/share/examples/atm/atm-sockets.txt
OLD_FILES+=usr/share/examples/atm/cpcs-design.txt
OLD_FILES+=usr/share/examples/atm/fore-microcode.txt
OLD_FILES+=usr/share/examples/atm/sscf-design.txt
OLD_FILES+=usr/share/examples/atm/sscop-design.txt
OLD_LIBS+=lib/libatm.so.5
OLD_FILES+=usr/lib/libatm.so
OLD_DIRS+=usr/include/netatm/sigpvc
OLD_DIRS+=usr/include/netatm/spans
OLD_DIRS+=usr/include/netatm/ipatm
OLD_DIRS+=usr/include/netatm/uni
OLD_DIRS+=usr/include/netatm
OLD_DIRS+=usr/share/examples/atm
# 20070705: I4B headers repo-copied to include/i4b/
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/machine/i4b_cause.h
OLD_FILES+=usr/include/machine/i4b_debug.h
OLD_FILES+=usr/include/machine/i4b_ioctl.h
OLD_FILES+=usr/include/machine/i4b_rbch_ioctl.h
OLD_FILES+=usr/include/machine/i4b_tel_ioctl.h
OLD_FILES+=usr/include/machine/i4b_trace.h
.endif
# 20070703: pf 4.1 import
OLD_FILES+=usr/libexec/ftp-proxy
# 20070701: KAME IPSec removal
OLD_FILES+=usr/include/netinet6/ah.h
OLD_FILES+=usr/include/netinet6/ah6.h
OLD_FILES+=usr/include/netinet6/ah_aesxcbcmac.h
OLD_FILES+=usr/include/netinet6/esp.h
OLD_FILES+=usr/include/netinet6/esp6.h
OLD_FILES+=usr/include/netinet6/esp_aesctr.h
OLD_FILES+=usr/include/netinet6/esp_camellia.h
OLD_FILES+=usr/include/netinet6/esp_rijndael.h
OLD_FILES+=usr/include/netinet6/ipsec.h
OLD_FILES+=usr/include/netinet6/ipsec6.h
OLD_FILES+=usr/include/netinet6/ipcomp.h
OLD_FILES+=usr/include/netinet6/ipcomp6.h
OLD_FILES+=usr/include/netkey/key.h
OLD_FILES+=usr/include/netkey/key_debug.h
OLD_FILES+=usr/include/netkey/key_var.h
OLD_FILES+=usr/include/netkey/keydb.h
OLD_FILES+=usr/include/netkey/keysock.h
OLD_DIRS+=usr/include/netkey
# 20070701: remove wicontrol
OLD_FILES+=usr/sbin/wicontrol
OLD_FILES+=usr/share/man/man8/wicontrol.8.gz
# 20070625: umapfs removal
OLD_FILES+=rescue/mount_umapfs
OLD_FILES+=sbin/mount_umapfs
OLD_FILES+=usr/include/fs/umapfs/umap.h
OLD_FILES+=usr/share/man/man8/mount_umapfs.8.gz
OLD_DIRS+=usr/include/fs/umapfs
# 20070618: Removal of the PROTO.localhost* files
OLD_FILES+=etc/namedb/PROTO.localhost-v6.rev
OLD_FILES+=etc/namedb/PROTO.localhost.rev
OLD_FILES+=etc/namedb/make-localhost
# 20070618: shared library version bump
OLD_LIBS+=lib/libalias.so.5
OLD_LIBS+=lib/libbsnmp.so.3
OLD_LIBS+=lib/libncurses.so.6
OLD_LIBS+=lib/libncursesw.so.6
OLD_LIBS+=lib/libreadline.so.6
OLD_LIBS+=usr/lib/libdialog.so.5
OLD_LIBS+=usr/lib/libgnuregex.so.3
OLD_LIBS+=usr/lib/libhistory.so.6
OLD_LIBS+=usr/lib/libpam.so.3
OLD_LIBS+=usr/lib/libssh.so.3
OLD_LIBS+=usr/lib/pam_chroot.so.3
OLD_LIBS+=usr/lib/pam_deny.so.3
OLD_LIBS+=usr/lib/pam_echo.so.3
OLD_LIBS+=usr/lib/pam_exec.so.3
OLD_LIBS+=usr/lib/pam_ftpusers.so.3
OLD_LIBS+=usr/lib/pam_group.so.3
OLD_LIBS+=usr/lib/pam_guest.so.3
OLD_LIBS+=usr/lib/pam_krb5.so.3
OLD_LIBS+=usr/lib/pam_ksu.so.3
OLD_LIBS+=usr/lib/pam_lastlog.so.3
OLD_LIBS+=usr/lib/pam_login_access.so.3
OLD_LIBS+=usr/lib/pam_nologin.so.3
OLD_LIBS+=usr/lib/pam_opie.so.3
OLD_LIBS+=usr/lib/pam_opieaccess.so.3
OLD_LIBS+=usr/lib/pam_passwdqc.so.3
OLD_LIBS+=usr/lib/pam_permit.so.3
OLD_LIBS+=usr/lib/pam_radius.so.3
OLD_LIBS+=usr/lib/pam_rhosts.so.3
OLD_LIBS+=usr/lib/pam_rootok.so.3
OLD_LIBS+=usr/lib/pam_securetty.so.3
OLD_LIBS+=usr/lib/pam_self.so.3
OLD_LIBS+=usr/lib/pam_ssh.so.3
OLD_LIBS+=usr/lib/pam_tacplus.so.3
OLD_LIBS+=usr/lib/pam_unix.so.3
OLD_LIBS+=usr/lib/snmp_atm.so.4
OLD_LIBS+=usr/lib/snmp_bridge.so.4
OLD_LIBS+=usr/lib/snmp_hostres.so.4
OLD_LIBS+=usr/lib/snmp_mibII.so.4
OLD_LIBS+=usr/lib/snmp_netgraph.so.4
OLD_LIBS+=usr/lib/snmp_pf.so.4
# 20070613: IPX over IP tunnel removal
OLD_FILES+=usr/include/netipx/ipx_ip.h
# 20070605: sched_core removal
OLD_FILES+=usr/share/man/man4/sched_core.4.gz
# 20070603: BIND 9.4.1 import
OLD_LIBS+=usr/lib/liblwres.so.10
# 20070521: shared library version bump
OLD_LIBS+=lib/libatm.so.4
OLD_LIBS+=lib/libbegemot.so.2
OLD_LIBS+=lib/libbsdxml.so.2
OLD_LIBS+=lib/libcam.so.3
OLD_LIBS+=lib/libcrypt.so.3
OLD_LIBS+=lib/libdevstat.so.5
OLD_LIBS+=lib/libedit.so.5
OLD_LIBS+=lib/libgeom.so.3
OLD_LIBS+=lib/libipsec.so.2
OLD_LIBS+=lib/libipx.so.3
OLD_LIBS+=lib/libkiconv.so.2
OLD_LIBS+=lib/libkse.so.2
OLD_LIBS+=lib/libkvm.so.3
OLD_LIBS+=lib/libm.so.4
OLD_LIBS+=lib/libmd.so.3
OLD_LIBS+=lib/libpcap.so.4
OLD_LIBS+=lib/libpthread.so.2
OLD_LIBS+=lib/libsbuf.so.3
OLD_LIBS+=lib/libthr.so.2
OLD_LIBS+=lib/libufs.so.3
OLD_LIBS+=lib/libutil.so.6
OLD_LIBS+=lib/libz.so.3
OLD_LIBS+=usr/lib/libbluetooth.so.2
OLD_LIBS+=usr/lib/libbsm.so.1
OLD_LIBS+=usr/lib/libbz2.so.2
OLD_LIBS+=usr/lib/libcalendar.so.3
OLD_LIBS+=usr/lib/libcom_err.so.3
OLD_LIBS+=usr/lib/libdevinfo.so.3
OLD_LIBS+=usr/lib/libfetch.so.4
OLD_LIBS+=usr/lib/libform.so.3
OLD_LIBS+=usr/lib/libformw.so.3
OLD_LIBS+=usr/lib/libftpio.so.6
OLD_LIBS+=usr/lib/libgpib.so.1
OLD_LIBS+=usr/lib/libkse.so.2
OLD_LIBS+=usr/lib/libmagic.so.2
OLD_LIBS+=usr/lib/libmemstat.so.1
OLD_LIBS+=usr/lib/libmenu.so.3
OLD_LIBS+=usr/lib/libmenuw.so.3
OLD_LIBS+=usr/lib/libmilter.so.3
OLD_LIBS+=usr/lib/libmp.so.5
OLD_LIBS+=usr/lib/libncp.so.2
OLD_LIBS+=usr/lib/libnetgraph.so.2
OLD_LIBS+=usr/lib/libngatm.so.2
OLD_LIBS+=usr/lib/libopie.so.4
OLD_LIBS+=usr/lib/libpanel.so.3
OLD_LIBS+=usr/lib/libpanelw.so.3
OLD_LIBS+=usr/lib/libpmc.so.3
OLD_LIBS+=usr/lib/libradius.so.2
OLD_LIBS+=usr/lib/librpcsvc.so.3
OLD_LIBS+=usr/lib/libsdp.so.2
OLD_LIBS+=usr/lib/libsmb.so.2
OLD_LIBS+=usr/lib/libstdc++.so.5
OLD_LIBS+=usr/lib/libtacplus.so.2
OLD_LIBS+=usr/lib/libthr.so.2
OLD_LIBS+=usr/lib/libthread_db.so.2
OLD_LIBS+=usr/lib/libugidfw.so.2
OLD_LIBS+=usr/lib/libusbhid.so.2
OLD_LIBS+=usr/lib/libvgl.so.4
OLD_LIBS+=usr/lib/libwrap.so.4
OLD_LIBS+=usr/lib/libypclnt.so.2
OLD_LIBS+=usr/lib/snmp_bridge.so.3
OLD_LIBS+=usr/lib/snmp_hostres.so.3
# 20070519: GCC 4.2
OLD_FILES+=usr/bin/f77
OLD_FILES+=usr/bin/protoize
OLD_FILES+=usr/include/g2c.h
OLD_FILES+=usr/libexec/f771
OLD_FILES+=usr/share/info/g77.info.gz
OLD_FILES+=usr/share/man/man1/f77.1.gz
OLD_FILES+=usr/include/c++/3.4/algorithm
OLD_FILES+=usr/include/c++/3.4/backward/algo.h
OLD_FILES+=usr/include/c++/3.4/backward/algobase.h
OLD_FILES+=usr/include/c++/3.4/backward/alloc.h
OLD_FILES+=usr/include/c++/3.4/backward/backward_warning.h
OLD_FILES+=usr/include/c++/3.4/backward/bvector.h
OLD_FILES+=usr/include/c++/3.4/backward/complex.h
OLD_FILES+=usr/include/c++/3.4/backward/defalloc.h
OLD_FILES+=usr/include/c++/3.4/backward/deque.h
OLD_FILES+=usr/include/c++/3.4/backward/fstream.h
OLD_FILES+=usr/include/c++/3.4/backward/function.h
OLD_FILES+=usr/include/c++/3.4/backward/hash_map.h
OLD_FILES+=usr/include/c++/3.4/backward/hash_set.h
OLD_FILES+=usr/include/c++/3.4/backward/hashtable.h
OLD_FILES+=usr/include/c++/3.4/backward/heap.h
OLD_FILES+=usr/include/c++/3.4/backward/iomanip.h
OLD_FILES+=usr/include/c++/3.4/backward/iostream.h
OLD_FILES+=usr/include/c++/3.4/backward/istream.h
OLD_FILES+=usr/include/c++/3.4/backward/iterator.h
OLD_FILES+=usr/include/c++/3.4/backward/list.h
OLD_FILES+=usr/include/c++/3.4/backward/map.h
OLD_FILES+=usr/include/c++/3.4/backward/multimap.h
OLD_FILES+=usr/include/c++/3.4/backward/multiset.h
OLD_FILES+=usr/include/c++/3.4/backward/new.h
OLD_FILES+=usr/include/c++/3.4/backward/ostream.h
OLD_FILES+=usr/include/c++/3.4/backward/pair.h
OLD_FILES+=usr/include/c++/3.4/backward/queue.h
OLD_FILES+=usr/include/c++/3.4/backward/rope.h
OLD_FILES+=usr/include/c++/3.4/backward/set.h
OLD_FILES+=usr/include/c++/3.4/backward/slist.h
OLD_FILES+=usr/include/c++/3.4/backward/stack.h
OLD_FILES+=usr/include/c++/3.4/backward/stream.h
OLD_FILES+=usr/include/c++/3.4/backward/streambuf.h
OLD_FILES+=usr/include/c++/3.4/backward/strstream
OLD_FILES+=usr/include/c++/3.4/backward/tempbuf.h
OLD_FILES+=usr/include/c++/3.4/backward/tree.h
OLD_FILES+=usr/include/c++/3.4/backward/vector.h
OLD_FILES+=usr/include/c++/3.4/bits/allocator.h
OLD_FILES+=usr/include/c++/3.4/bits/atomic_word.h
OLD_FILES+=usr/include/c++/3.4/bits/atomicity.h
OLD_FILES+=usr/include/c++/3.4/bits/basic_file.h
OLD_FILES+=usr/include/c++/3.4/bits/basic_ios.h
OLD_FILES+=usr/include/c++/3.4/bits/basic_ios.tcc
OLD_FILES+=usr/include/c++/3.4/bits/basic_string.h
OLD_FILES+=usr/include/c++/3.4/bits/basic_string.tcc
OLD_FILES+=usr/include/c++/3.4/bits/boost_concept_check.h
OLD_FILES+=usr/include/c++/3.4/bits/c++allocator.h
OLD_FILES+=usr/include/c++/3.4/bits/c++config.h
OLD_FILES+=usr/include/c++/3.4/bits/c++io.h
OLD_FILES+=usr/include/c++/3.4/bits/c++locale.h
OLD_FILES+=usr/include/c++/3.4/bits/c++locale_internal.h
OLD_FILES+=usr/include/c++/3.4/bits/char_traits.h
OLD_FILES+=usr/include/c++/3.4/bits/cmath.tcc
OLD_FILES+=usr/include/c++/3.4/bits/codecvt.h
OLD_FILES+=usr/include/c++/3.4/bits/codecvt_specializations.h
OLD_FILES+=usr/include/c++/3.4/bits/concept_check.h
OLD_FILES+=usr/include/c++/3.4/bits/concurrence.h
OLD_FILES+=usr/include/c++/3.4/bits/cpp_type_traits.h
OLD_FILES+=usr/include/c++/3.4/bits/ctype_base.h
OLD_FILES+=usr/include/c++/3.4/bits/ctype_inline.h
OLD_FILES+=usr/include/c++/3.4/bits/ctype_noninline.h
OLD_FILES+=usr/include/c++/3.4/bits/deque.tcc
OLD_FILES+=usr/include/c++/3.4/bits/fstream.tcc
OLD_FILES+=usr/include/c++/3.4/bits/functexcept.h
OLD_FILES+=usr/include/c++/3.4/bits/gslice.h
OLD_FILES+=usr/include/c++/3.4/bits/gslice_array.h
OLD_FILES+=usr/include/c++/3.4/bits/gthr-default.h
OLD_FILES+=usr/include/c++/3.4/bits/gthr-posix.h
OLD_FILES+=usr/include/c++/3.4/bits/gthr-single.h
OLD_FILES+=usr/include/c++/3.4/bits/gthr.h
OLD_FILES+=usr/include/c++/3.4/bits/indirect_array.h
OLD_FILES+=usr/include/c++/3.4/bits/ios_base.h
OLD_FILES+=usr/include/c++/3.4/bits/istream.tcc
OLD_FILES+=usr/include/c++/3.4/bits/list.tcc
OLD_FILES+=usr/include/c++/3.4/bits/locale_classes.h
OLD_FILES+=usr/include/c++/3.4/bits/locale_facets.h
OLD_FILES+=usr/include/c++/3.4/bits/locale_facets.tcc
OLD_FILES+=usr/include/c++/3.4/bits/localefwd.h
OLD_FILES+=usr/include/c++/3.4/bits/mask_array.h
OLD_FILES+=usr/include/c++/3.4/bits/messages_members.h
OLD_FILES+=usr/include/c++/3.4/bits/os_defines.h
OLD_FILES+=usr/include/c++/3.4/bits/ostream.tcc
OLD_FILES+=usr/include/c++/3.4/bits/postypes.h
OLD_FILES+=usr/include/c++/3.4/bits/slice_array.h
OLD_FILES+=usr/include/c++/3.4/bits/sstream.tcc
OLD_FILES+=usr/include/c++/3.4/bits/stl_algo.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_algobase.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_bvector.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_construct.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_deque.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_function.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_heap.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_iterator.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_iterator_base_funcs.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_iterator_base_types.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_list.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_map.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_multimap.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_multiset.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_numeric.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_pair.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_queue.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_raw_storage_iter.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_relops.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_set.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_stack.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_tempbuf.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_threads.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_tree.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_uninitialized.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_vector.h
OLD_FILES+=usr/include/c++/3.4/bits/stream_iterator.h
OLD_FILES+=usr/include/c++/3.4/bits/streambuf.tcc
OLD_FILES+=usr/include/c++/3.4/bits/streambuf_iterator.h
OLD_FILES+=usr/include/c++/3.4/bits/stringfwd.h
OLD_FILES+=usr/include/c++/3.4/bits/time_members.h
OLD_FILES+=usr/include/c++/3.4/bits/type_traits.h
OLD_FILES+=usr/include/c++/3.4/bits/valarray_after.h
OLD_FILES+=usr/include/c++/3.4/bits/valarray_array.h
OLD_FILES+=usr/include/c++/3.4/bits/valarray_array.tcc
OLD_FILES+=usr/include/c++/3.4/bits/valarray_before.h
OLD_FILES+=usr/include/c++/3.4/bits/vector.tcc
OLD_FILES+=usr/include/c++/3.4/bitset
OLD_FILES+=usr/include/c++/3.4/cassert
OLD_FILES+=usr/include/c++/3.4/cctype
OLD_FILES+=usr/include/c++/3.4/cerrno
OLD_FILES+=usr/include/c++/3.4/cfloat
OLD_FILES+=usr/include/c++/3.4/ciso646
OLD_FILES+=usr/include/c++/3.4/climits
OLD_FILES+=usr/include/c++/3.4/clocale
OLD_FILES+=usr/include/c++/3.4/cmath
OLD_FILES+=usr/include/c++/3.4/complex
OLD_FILES+=usr/include/c++/3.4/csetjmp
OLD_FILES+=usr/include/c++/3.4/csignal
OLD_FILES+=usr/include/c++/3.4/cstdarg
OLD_FILES+=usr/include/c++/3.4/cstddef
OLD_FILES+=usr/include/c++/3.4/cstdio
OLD_FILES+=usr/include/c++/3.4/cstdlib
OLD_FILES+=usr/include/c++/3.4/cstring
OLD_FILES+=usr/include/c++/3.4/ctime
OLD_FILES+=usr/include/c++/3.4/cwchar
OLD_FILES+=usr/include/c++/3.4/cwctype
OLD_FILES+=usr/include/c++/3.4/cxxabi.h
OLD_FILES+=usr/include/c++/3.4/debug/bitset
OLD_FILES+=usr/include/c++/3.4/debug/debug.h
OLD_FILES+=usr/include/c++/3.4/debug/deque
OLD_FILES+=usr/include/c++/3.4/debug/formatter.h
OLD_FILES+=usr/include/c++/3.4/debug/hash_map
OLD_FILES+=usr/include/c++/3.4/debug/hash_map.h
OLD_FILES+=usr/include/c++/3.4/debug/hash_multimap.h
OLD_FILES+=usr/include/c++/3.4/debug/hash_multiset.h
OLD_FILES+=usr/include/c++/3.4/debug/hash_set
OLD_FILES+=usr/include/c++/3.4/debug/hash_set.h
OLD_FILES+=usr/include/c++/3.4/debug/list
OLD_FILES+=usr/include/c++/3.4/debug/map
OLD_FILES+=usr/include/c++/3.4/debug/map.h
OLD_FILES+=usr/include/c++/3.4/debug/multimap.h
OLD_FILES+=usr/include/c++/3.4/debug/multiset.h
OLD_FILES+=usr/include/c++/3.4/debug/safe_base.h
OLD_FILES+=usr/include/c++/3.4/debug/safe_iterator.h
OLD_FILES+=usr/include/c++/3.4/debug/safe_iterator.tcc
OLD_FILES+=usr/include/c++/3.4/debug/safe_sequence.h
OLD_FILES+=usr/include/c++/3.4/debug/set
OLD_FILES+=usr/include/c++/3.4/debug/set.h
OLD_FILES+=usr/include/c++/3.4/debug/string
OLD_FILES+=usr/include/c++/3.4/debug/vector
OLD_FILES+=usr/include/c++/3.4/deque
OLD_FILES+=usr/include/c++/3.4/exception
OLD_FILES+=usr/include/c++/3.4/exception_defines.h
OLD_FILES+=usr/include/c++/3.4/ext/algorithm
OLD_FILES+=usr/include/c++/3.4/ext/bitmap_allocator.h
OLD_FILES+=usr/include/c++/3.4/ext/debug_allocator.h
OLD_FILES+=usr/include/c++/3.4/ext/enc_filebuf.h
OLD_FILES+=usr/include/c++/3.4/ext/functional
OLD_FILES+=usr/include/c++/3.4/ext/hash_fun.h
OLD_FILES+=usr/include/c++/3.4/ext/hash_map
OLD_FILES+=usr/include/c++/3.4/ext/hash_set
OLD_FILES+=usr/include/c++/3.4/ext/hashtable.h
OLD_FILES+=usr/include/c++/3.4/ext/iterator
OLD_FILES+=usr/include/c++/3.4/ext/malloc_allocator.h
OLD_FILES+=usr/include/c++/3.4/ext/memory
OLD_FILES+=usr/include/c++/3.4/ext/mt_allocator.h
OLD_FILES+=usr/include/c++/3.4/ext/new_allocator.h
OLD_FILES+=usr/include/c++/3.4/ext/numeric
OLD_FILES+=usr/include/c++/3.4/ext/pod_char_traits.h
OLD_FILES+=usr/include/c++/3.4/ext/pool_allocator.h
OLD_FILES+=usr/include/c++/3.4/ext/rb_tree
OLD_FILES+=usr/include/c++/3.4/ext/rope
OLD_FILES+=usr/include/c++/3.4/ext/ropeimpl.h
OLD_FILES+=usr/include/c++/3.4/ext/slist
OLD_FILES+=usr/include/c++/3.4/ext/stdio_filebuf.h
OLD_FILES+=usr/include/c++/3.4/ext/stdio_sync_filebuf.h
OLD_FILES+=usr/include/c++/3.4/fstream
OLD_FILES+=usr/include/c++/3.4/functional
OLD_FILES+=usr/include/c++/3.4/iomanip
OLD_FILES+=usr/include/c++/3.4/ios
OLD_FILES+=usr/include/c++/3.4/iosfwd
OLD_FILES+=usr/include/c++/3.4/iostream
OLD_FILES+=usr/include/c++/3.4/istream
OLD_FILES+=usr/include/c++/3.4/iterator
OLD_FILES+=usr/include/c++/3.4/limits
OLD_FILES+=usr/include/c++/3.4/list
OLD_FILES+=usr/include/c++/3.4/locale
OLD_FILES+=usr/include/c++/3.4/map
OLD_FILES+=usr/include/c++/3.4/memory
OLD_FILES+=usr/include/c++/3.4/new
OLD_FILES+=usr/include/c++/3.4/numeric
OLD_FILES+=usr/include/c++/3.4/ostream
OLD_FILES+=usr/include/c++/3.4/queue
OLD_FILES+=usr/include/c++/3.4/set
OLD_FILES+=usr/include/c++/3.4/sstream
OLD_FILES+=usr/include/c++/3.4/stack
OLD_FILES+=usr/include/c++/3.4/stdexcept
OLD_FILES+=usr/include/c++/3.4/streambuf
OLD_FILES+=usr/include/c++/3.4/string
OLD_FILES+=usr/include/c++/3.4/typeinfo
OLD_FILES+=usr/include/c++/3.4/utility
OLD_FILES+=usr/include/c++/3.4/valarray
OLD_FILES+=usr/include/c++/3.4/vector
OLD_DIRS+=usr/include/c++/3.4/backward
OLD_DIRS+=usr/include/c++/3.4/bits
OLD_DIRS+=usr/include/c++/3.4/debug
OLD_DIRS+=usr/include/c++/3.4/ext
OLD_DIRS+=usr/include/c++/3.4
# 20070510: zpool/zfs moved to /sbin
OLD_FILES+=usr/sbin/zfs
OLD_FILES+=usr/sbin/zpool
# 20070423: rc.bluetooth (examples) removed
OLD_FILES+=usr/share/examples/netgraph/bluetooth/rc.bluetooth
OLD_DIRS+=usr/share/examples/netgraph/bluetooth
# 20070421: worm.4 removed
OLD_FILES+=usr/share/man/man4/worm.4.gz
# 20070417: trunk(4) renamed to lagg(4)
OLD_FILES+=usr/include/net/if_trunk.h
# 20070409: uuidgen moved to /bin/
OLD_FILES+=usr/bin/uuidgen
# 20070328: bzip2 1.0.4
OLD_FILES+=usr/share/info/bzip2.info.gz
# 20070303: libarchive 2.0
OLD_LIBS+=usr/lib/libarchive.so.3
# 20070301: remove addr2ascii and ascii2addr
OLD_FILES+=usr/share/man/man3/addr2ascii.3.gz
OLD_FILES+=usr/share/man/man3/ascii2addr.3.gz
# 20070225: vm_page_unmanage() removed
OLD_FILES+=usr/share/man/man9/vm_page_unmanage.9.gz
# 20070216: VFS_VPTOFH(9) -> VOP_VPTOFH(9)
OLD_FILES+=usr/share/man/man9/VFS_VPTOFH.9.gz
# 20070212: kame.4 removed
OLD_FILES+=usr/share/man/man4/kame.4.gz
# 20070201: remove libmytinfo link
OLD_FILES+=usr/lib/libmytinfo.a
OLD_FILES+=usr/lib/libmytinfo.so
OLD_FILES+=usr/lib/libmytinfo_p.a
OLD_FILES+=usr/lib/libmytinfow.a
OLD_FILES+=usr/lib/libmytinfow.so
OLD_FILES+=usr/lib/libmytinfow_p.a
# 20070128: remove vnconfig
OLD_FILES+=usr/sbin/vnconfig
# 20070127: remove bpf_compat.h
OLD_FILES+=usr/include/net/bpf_compat.h
# 20070125: objformat bites the dust
OLD_FILES+=usr/bin/objformat
OLD_FILES+=usr/share/man/man1/objformat.1.gz
OLD_FILES+=usr/include/objformat.h
OLD_FILES+=usr/share/man/man3/getobjformat.3.gz
# 20061008: rename *.so.4 libalias modules to *.so and move to /lib
# This uses MOVED_LIBS because the new files are libraries even though
# the old files to remove are symlinks
MOVED_LIBS+=usr/lib/libalias_cuseeme.so
MOVED_LIBS+=usr/lib/libalias_dummy.so
MOVED_LIBS+=usr/lib/libalias_ftp.so
MOVED_LIBS+=usr/lib/libalias_irc.so
MOVED_LIBS+=usr/lib/libalias_nbt.so
MOVED_LIBS+=usr/lib/libalias_pptp.so
MOVED_LIBS+=usr/lib/libalias_skinny.so
MOVED_LIBS+=usr/lib/libalias_smedia.so
OLD_LIBS+=lib/libalias_cuseeme.so.4
OLD_LIBS+=lib/libalias_dummy.so.4
OLD_LIBS+=lib/libalias_ftp.so.4
OLD_LIBS+=lib/libalias_irc.so.4
OLD_LIBS+=lib/libalias_nbt.so.4
OLD_LIBS+=lib/libalias_pptp.so.4
OLD_LIBS+=lib/libalias_skinny.so.4
OLD_LIBS+=lib/libalias_smedia.so.4
# 20061126: remove old man page
OLD_FILES+=usr/share/man/man3/archive_read_set_bytes_per_block.3.gz
# 20061125: remove old man page
OLD_FILES+=usr/share/man/man9/devsw.9.gz
# 20061122: remove obsolete mount programs
OLD_FILES+=sbin/mount_devfs
OLD_FILES+=sbin/mount_ext2fs
OLD_FILES+=sbin/mount_fdescfs
OLD_FILES+=sbin/mount_linprocfs
OLD_FILES+=sbin/mount_procfs
OLD_FILES+=sbin/mount_std
OLD_FILES+=rescue/mount_devfs
OLD_FILES+=rescue/mount_ext2fs
OLD_FILES+=rescue/mount_fdescfs
OLD_FILES+=rescue/mount_linprocfs
OLD_FILES+=rescue/mount_procfs
OLD_FILES+=rescue/mount_std
OLD_FILES+=usr/share/man/man8/mount_devfs.8.gz
OLD_FILES+=usr/share/man/man8/mount_ext2fs.8.gz
OLD_FILES+=usr/share/man/man8/mount_fdescfs.8.gz
OLD_FILES+=usr/share/man/man8/mount_linprocfs.8.gz
OLD_FILES+=usr/share/man/man8/mount_procfs.8.gz
OLD_FILES+=usr/share/man/man8/mount_std.8.gz
# 20061116: uhidev.4 removed
OLD_FILES+=usr/share/man/man4/uhidev.4.gz
# 20061106: archive_write_prepare.3 removed
OLD_FILES+=usr/share/man/man3/archive_write_prepare.3.gz
# 20061018: pccardc removed
OLD_FILES+=usr/sbin/pccardc usr/share/man/man8/pccardc.8.gz
# 20060930: demangle.h from contrib/libstdc++/include/ext/
OLD_FILES+=usr/include/c++/3.4/ext/demangle.h
# 20060929: mrouted removed
OLD_FILES+=usr/sbin/map-mbone
OLD_FILES+=usr/sbin/mrinfo
OLD_FILES+=usr/sbin/mrouted
OLD_FILES+=usr/sbin/mtrace
OLD_FILES+=usr/share/man/man8/map-mbone.8.gz
OLD_FILES+=usr/share/man/man8/mrinfo.8.gz
OLD_FILES+=usr/share/man/man8/mrouted.8.gz
OLD_FILES+=usr/share/man/man8/mtrace.8.gz
# 20060924: tcpslice removed
OLD_FILES+=usr/sbin/tcpslice
OLD_FILES+=usr/share/man/man1/tcpslice.1.gz
# 20060829: kvmdb cleanup script removed
OLD_FILES+=etc/periodic/weekly/120.clean-kvmdb
# 20060822: ramdisk{,-own} have been replaced by mdconfig{,2}
OLD_FILES+=etc/rc.d/ramdisk
OLD_FILES+=etc/rc.d/ramdisk-own
# 20060729: OpenSSL 0.9.7e -> 0.9.8b upgrade
OLD_FILES+=usr/include/openssl/eng_int.h
OLD_FILES+=usr/include/openssl/hw_4758_cca_err.h
OLD_FILES+=usr/include/openssl/hw_aep_err.h
OLD_FILES+=usr/include/openssl/hw_atalla_err.h
OLD_FILES+=usr/include/openssl/hw_cswift_err.h
OLD_FILES+=usr/include/openssl/hw_ncipher_err.h
OLD_FILES+=usr/include/openssl/hw_nuron_err.h
OLD_FILES+=usr/include/openssl/hw_sureware_err.h
OLD_FILES+=usr/include/openssl/hw_ubsec_err.h
# 20060713: mount_linsysfs(8) never existed in 7.x
OLD_FILES+=sbin/mount_linsysfs
OLD_FILES+=usr/share/man/man8/mount_linsysfs.8.gz
# 20060704: KAME compat file net_osdep.h removed
OLD_FILES+=usr/include/net/net_osdep.h
# 20060605: man page links removed by OpenBSM 1.0 alpha 6 import
OLD_FILES+=usr/share/man/man3/au_to_socket.3.gz
OLD_FILES+=usr/share/man/man3/au_to_socket_ex_128.3.gz
OLD_FILES+=usr/share/man/man3/au_to_socket_ex_32.3.gz
# 20060517: pcvt removed
OLD_FILES+=usr/share/pcvt/README.FIRST
OLD_FILES+=usr/share/pcvt/Etc/xmodmap-german
OLD_FILES+=usr/share/pcvt/Etc/pcvt.sh
OLD_FILES+=usr/share/pcvt/Etc/pcvt.el
OLD_FILES+=usr/share/pcvt/Etc/Terminfo
OLD_FILES+=usr/share/pcvt/Etc/Termcap
OLD_DIRS+=usr/share/pcvt/Etc
OLD_FILES+=usr/share/pcvt/Doc/NotesAndHints
OLD_FILES+=usr/share/pcvt/Doc/Keyboard.VT
OLD_FILES+=usr/share/pcvt/Doc/Keyboard.HP
OLD_FILES+=usr/share/pcvt/Doc/EscapeSequences
OLD_FILES+=usr/share/pcvt/Doc/Charsets
OLD_FILES+=usr/share/pcvt/Doc/CharGen
OLD_FILES+=usr/share/pcvt/Doc/Bibliography
OLD_FILES+=usr/share/pcvt/Doc/Acknowledgements
OLD_DIRS+=usr/share/pcvt/Doc
OLD_DIRS+=usr/share/pcvt
OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.816
OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.814
OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.810
OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.808
OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.816
OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.814
OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.810
OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.808
OLD_DIRS+=usr/share/misc/pcvtfonts
OLD_FILES+=usr/share/misc/keycap.pcvt
OLD_FILES+=usr/share/man/man8/ispcvt.8.gz
OLD_FILES+=usr/share/man/man5/keycap.5.gz
OLD_FILES+=usr/share/man/man4/pcvt.4.gz
OLD_FILES+=usr/share/man/man3/kgetstr.3.gz
OLD_FILES+=usr/share/man/man3/kgetnum.3.gz
OLD_FILES+=usr/share/man/man3/kgetflag.3.gz
OLD_FILES+=usr/share/man/man3/kgetent.3.gz
OLD_FILES+=usr/share/man/man3/keycap.3.gz
OLD_FILES+=usr/share/man/man1/vt220keys.1.gz
OLD_FILES+=usr/share/man/man1/scon.1.gz
OLD_FILES+=usr/share/man/man1/loadfont.1.gz
OLD_FILES+=usr/share/man/man1/kcon.1.gz
OLD_FILES+=usr/share/man/man1/fontedit.1.gz
OLD_FILES+=usr/share/man/man1/cursor.1.gz
OLD_FILES+=usr/sbin/vt220keys
OLD_FILES+=usr/sbin/scon
OLD_FILES+=usr/sbin/loadfont
OLD_FILES+=usr/sbin/kcon
OLD_FILES+=usr/sbin/ispcvt
OLD_FILES+=usr/sbin/fontedit
OLD_FILES+=usr/sbin/cursor
OLD_FILES+=usr/lib/libkeycap_p.a
OLD_FILES+=usr/lib/libkeycap.a
OLD_FILES+=usr/include/machine/pcvt_ioctl.h
# 20060514: lnc(4) replaced by le(4)
OLD_FILES+=usr/share/man/man4/i386/lnc.4.gz
# 20060512: remove ip6fw
OLD_FILES+=etc/periodic/security/600.ip6fwdenied
OLD_FILES+=etc/periodic/security/650.ip6fwlimit
OLD_FILES+=sbin/ip6fw
OLD_FILES+=usr/include/netinet6/ip6_fw.h
OLD_FILES+=usr/share/man/man8/ip6fw.8.gz
# 20060424: sab(4) removed
OLD_FILES+=usr/share/man/man4/sab.4.gz
# 20060328: remove redundant rc.d script
OLD_FILES+=etc/rc.d/ike
# 20060127: revert libdisk to static-only
OLD_FILES+=usr/lib/libdisk.so
# 20060115: sys/pccard includes cleanup
OLD_FILES+=usr/include/pccard/driver.h
OLD_FILES+=usr/include/pccard/i82365.h
OLD_FILES+=usr/include/pccard/meciareg.h
OLD_FILES+=usr/include/pccard/pccard_nbk.h
OLD_FILES+=usr/include/pccard/pcic_pci.h
OLD_FILES+=usr/include/pccard/pcicvar.h
OLD_FILES+=usr/include/pccard/slot.h
# 20051215: rescue/nextboot.sh renamed to rescue/nextboot
OLD_FILES+=rescue/nextboot.sh
# 20051214: usbd(8) removed
OLD_FILES+=etc/rc.d/usbd
OLD_FILES+=etc/usbd.conf
OLD_FILES+=usr/sbin/usbd
OLD_FILES+=usr/share/man/man8/usbd.8.gz
# 20051029: rc.d/ppp-user renamed to rc.d/ppp for convenience
OLD_FILES+=etc/rc.d/ppp-user
# 20051012: setkey(8) moved to /sbin/
OLD_FILES+=usr/sbin/setkey
# 20050930: pccardd(8) removed
OLD_FILES+=usr/sbin/pccardd
OLD_FILES+=usr/share/man/man5/pccard.conf.5.gz
OLD_FILES+=usr/share/man/man8/pccardd.8.gz
# 20050927: bridge(4) replaced by if_bridge(4)
OLD_FILES+=usr/include/net/bridge.h
# 20050831: not implemented
OLD_FILES+=usr/share/man/man3/getino.3.gz
OLD_FILES+=usr/share/man/man3/putino.3.gz
# 20050825: T/TCP retired several months ago
OLD_FILES+=usr/share/man/man4/ttcp.4.gz
# 20050805 tn3270 retired long ago
OLD_FILES+=usr/share/misc/map3270
# 20050801: too old to be interesting here
OLD_FILES+=usr/share/doc/papers/px.ps.gz
# 20050721: moved to ports
OLD_FILES+=usr/sbin/vttest
OLD_FILES+=usr/share/man/man1/vttest.1.gz
# 20050617: wpa man pages moved to section 8
OLD_FILES+=usr/share/man/man1/hostapd.1.gz
OLD_FILES+=usr/share/man/man1/hostapd_cli.1.gz
OLD_FILES+=usr/share/man/man1/wpa_cli.1.gz
OLD_FILES+=usr/share/man/man1/wpa_supplicant.1.gz
# 20050610: rexecd (insecure by design)
OLD_FILES+=etc/pam.d/rexecd
OLD_FILES+=usr/share/man/man8/rexecd.8.gz
OLD_FILES+=usr/libexec/rexecd
# 20050606: OpenBSD dhclient replaces ISC one
OLD_FILES+=bin/omshell
OLD_FILES+=sbin/omshell
OLD_FILES+=usr/share/man/man1/omshell.1.gz
OLD_FILES+=usr/share/man/man5/dhcp-eval.5.gz
# 200504XX: ipf tools moved from /usr to /
OLD_FILES+=rescue/ipfs
OLD_FILES+=rescue/ipfstat
OLD_FILES+=rescue/ipmon
OLD_FILES+=rescue/ipnat
OLD_FILES+=usr/sbin/ipftest
OLD_FILES+=usr/sbin/ipresend
OLD_FILES+=usr/sbin/ipsend
OLD_FILES+=usr/sbin/iptest
OLD_FILES+=usr/share/man/man1/ipnat.1.gz
OLD_FILES+=usr/share/man/man1/ipsend.1.gz
OLD_FILES+=usr/share/man/man1/iptest.1.gz
OLD_FILES+=usr/share/man/man5/ipsend.5.gz
# 200503XX: bsdtar takes over gtar
OLD_FILES+=usr/bin/gtar
OLD_FILES+=usr/share/man/man1/gtar.1.gz
# 200503XX
OLD_FILES+=usr/share/man/man3/exp10.3.gz
OLD_FILES+=usr/share/man/man3/exp10f.3.gz
OLD_FILES+=usr/share/man/man3/fpsetsticky.3.gz
# 20050324: updated release infrastructure
OLD_FILES+=usr/share/man/man5/drivers.conf.5.gz
# 20050317: removed from BIND 9 distribution
OLD_FILES+=usr/share/doc/bind9/KNOWN_DEFECTS
# 2005XXXX:
OLD_FILES+=sbin/mount_autofs
OLD_FILES+=usr/lib/libautofs.a
OLD_FILES+=usr/lib/libautofs.so
OLD_FILES+=usr/share/man/man8/mount_autofs.8.gz
# 20050203: Merged with fortunes
OLD_FILES+=usr/share/games/fortune/fortunes2
OLD_FILES+=usr/share/games/fortune/fortunes2.dat
# 200501XX:
OLD_FILES+=usr/libexec/getNAME
# 200411XX: gvinum replaces vinum
OLD_FILES+=bin/vinum
OLD_FILES+=rescue/vinum
OLD_FILES+=sbin/vinum
OLD_FILES+=usr/share/man/man8/vinum.8.gz
# 200411XX: libxpg4 removal
OLD_FILES+=usr/lib/libxpg4.a
OLD_FILES+=usr/lib/libxpg4.so
OLD_FILES+=usr/lib/libxpg4_p.a
# 20041109: replaced by em(4)
OLD_FILES+=usr/share/man/man4/gx.4.gz
OLD_FILES+=usr/share/man/man4/if_gx.4.gz
# 20041017: rune interface removed
OLD_FILES+=usr/include/rune.h
OLD_FILES+=usr/share/man/man3/fgetrune.3.gz
OLD_FILES+=usr/share/man/man3/fputrune.3.gz
OLD_FILES+=usr/share/man/man3/fungetrune.3.gz
OLD_FILES+=usr/share/man/man3/mbrrune.3.gz
OLD_FILES+=usr/share/man/man3/mbrune.3.gz
OLD_FILES+=usr/share/man/man3/rune.3.gz
OLD_FILES+=usr/share/man/man3/setinvalidrune.3.gz
OLD_FILES+=usr/share/man/man3/sgetrune.3.gz
OLD_FILES+=usr/share/man/man3/sputrune.3.gz
# 20040925: bind9 import
OLD_FILES+=usr/bin/dnskeygen
OLD_FILES+=usr/bin/dnsquery
OLD_FILES+=usr/lib/libisc.a
OLD_FILES+=usr/lib/libisc.so
OLD_FILES+=usr/lib/libisc_p.a
OLD_FILES+=usr/libexec/named-xfer
OLD_FILES+=usr/sbin/named.restart
OLD_FILES+=usr/sbin/ndc
OLD_FILES+=usr/sbin/nslookup
OLD_FILES+=usr/sbin/nsupdate
OLD_FILES+=usr/share/doc/bind/html/acl.html
OLD_FILES+=usr/share/doc/bind/html/address_list.html
OLD_FILES+=usr/share/doc/bind/html/comments.html
OLD_FILES+=usr/share/doc/bind/html/config.html
OLD_FILES+=usr/share/doc/bind/html/controls.html
OLD_FILES+=usr/share/doc/bind/html/docdef.html
OLD_FILES+=usr/share/doc/bind/html/example.html
OLD_FILES+=usr/share/doc/bind/html/include.html
OLD_FILES+=usr/share/doc/bind/html/index.html
OLD_FILES+=usr/share/doc/bind/html/key.html
OLD_FILES+=usr/share/doc/bind/html/logging.html
OLD_FILES+=usr/share/doc/bind/html/master.html
OLD_FILES+=usr/share/doc/bind/html/options.html
OLD_FILES+=usr/share/doc/bind/html/server.html
OLD_FILES+=usr/share/doc/bind/html/trusted-keys.html
OLD_FILES+=usr/share/doc/bind/html/zone.html
OLD_FILES+=usr/share/doc/bind/misc/DynamicUpdate
OLD_FILES+=usr/share/doc/bind/misc/FAQ.1of2
OLD_FILES+=usr/share/doc/bind/misc/FAQ.2of2
OLD_FILES+=usr/share/doc/bind/misc/rfc2317-notes.txt
OLD_FILES+=usr/share/doc/bind/misc/style.txt
OLD_FILES+=usr/share/man/man1/dnskeygen.1.gz
OLD_FILES+=usr/share/man/man1/dnsquery.1.gz
OLD_FILES+=usr/share/man/man8/named-bootconf.8.gz
OLD_FILES+=usr/share/man/man8/named-xfer.8.gz
OLD_FILES+=usr/share/man/man8/named.restart.8.gz
OLD_FILES+=usr/share/man/man8/ndc.8.gz
OLD_FILES+=usr/share/man/man8/nslookup.8.gz
# 200409XX
OLD_FILES+=usr/share/man/man3/ENSURE.3.gz
OLD_FILES+=usr/share/man/man3/ENSURE_ERR.3.gz
OLD_FILES+=usr/share/man/man3/INSIST.3.gz
OLD_FILES+=usr/share/man/man3/INSIST_ERR.3.gz
OLD_FILES+=usr/share/man/man3/INVARIANT.3.gz
OLD_FILES+=usr/share/man/man3/INVARIANT_ERR.3.gz
OLD_FILES+=usr/share/man/man3/REQUIRE.3.gz
OLD_FILES+=usr/share/man/man3/REQUIRE_ERR.3.gz
OLD_FILES+=usr/share/man/man3/assertion_type_to_text.3.gz
OLD_FILES+=usr/share/man/man3/assertions.3.gz
OLD_FILES+=usr/share/man/man3/bitncmp.3.gz
OLD_FILES+=usr/share/man/man3/evAddTime.3.gz
OLD_FILES+=usr/share/man/man3/evCancelConn.3.gz
OLD_FILES+=usr/share/man/man3/evCancelRW.3.gz
OLD_FILES+=usr/share/man/man3/evClearIdleTimer.3.gz
OLD_FILES+=usr/share/man/man3/evClearTimer.3.gz
OLD_FILES+=usr/share/man/man3/evCmpTime.3.gz
OLD_FILES+=usr/share/man/man3/evConnFunc.3.gz
OLD_FILES+=usr/share/man/man3/evConnect.3.gz
OLD_FILES+=usr/share/man/man3/evConsIovec.3.gz
OLD_FILES+=usr/share/man/man3/evConsTime.3.gz
OLD_FILES+=usr/share/man/man3/evCreate.3.gz
OLD_FILES+=usr/share/man/man3/evDefer.3.gz
OLD_FILES+=usr/share/man/man3/evDeselectFD.3.gz
OLD_FILES+=usr/share/man/man3/evDestroy.3.gz
OLD_FILES+=usr/share/man/man3/evDispatch.3.gz
OLD_FILES+=usr/share/man/man3/evDo.3.gz
OLD_FILES+=usr/share/man/man3/evDrop.3.gz
OLD_FILES+=usr/share/man/man3/evFileFunc.3.gz
OLD_FILES+=usr/share/man/man3/evGetNext.3.gz
OLD_FILES+=usr/share/man/man3/evHold.3.gz
OLD_FILES+=usr/share/man/man3/evInitID.3.gz
OLD_FILES+=usr/share/man/man3/evLastEventTime.3.gz
OLD_FILES+=usr/share/man/man3/evListen.3.gz
OLD_FILES+=usr/share/man/man3/evMainLoop.3.gz
OLD_FILES+=usr/share/man/man3/evNowTime.3.gz
OLD_FILES+=usr/share/man/man3/evPrintf.3.gz
OLD_FILES+=usr/share/man/man3/evRead.3.gz
OLD_FILES+=usr/share/man/man3/evResetTimer.3.gz
OLD_FILES+=usr/share/man/man3/evSelectFD.3.gz
OLD_FILES+=usr/share/man/man3/evSetDebug.3.gz
OLD_FILES+=usr/share/man/man3/evSetIdleTimer.3.gz
OLD_FILES+=usr/share/man/man3/evSetTimer.3.gz
OLD_FILES+=usr/share/man/man3/evStreamFunc.3.gz
OLD_FILES+=usr/share/man/man3/evSubTime.3.gz
OLD_FILES+=usr/share/man/man3/evTestID.3.gz
OLD_FILES+=usr/share/man/man3/evTimeRW.3.gz
OLD_FILES+=usr/share/man/man3/evTimeSpec.3.gz
OLD_FILES+=usr/share/man/man3/evTimeVal.3.gz
OLD_FILES+=usr/share/man/man3/evTimerFunc.3.gz
OLD_FILES+=usr/share/man/man3/evTouchIdleTimer.3.gz
OLD_FILES+=usr/share/man/man3/evTryAccept.3.gz
OLD_FILES+=usr/share/man/man3/evUnhold.3.gz
OLD_FILES+=usr/share/man/man3/evUntimeRW.3.gz
OLD_FILES+=usr/share/man/man3/evUnwait.3.gz
OLD_FILES+=usr/share/man/man3/evWaitFor.3.gz
OLD_FILES+=usr/share/man/man3/evWaitFunc.3.gz
OLD_FILES+=usr/share/man/man3/evWrite.3.gz
OLD_FILES+=usr/share/man/man3/eventlib.3.gz
OLD_FILES+=usr/share/man/man3/heap.3.gz
OLD_FILES+=usr/share/man/man3/heap_decreased.3.gz
OLD_FILES+=usr/share/man/man3/heap_delete.3.gz
OLD_FILES+=usr/share/man/man3/heap_element.3.gz
OLD_FILES+=usr/share/man/man3/heap_for_each.3.gz
OLD_FILES+=usr/share/man/man3/heap_free.3.gz
OLD_FILES+=usr/share/man/man3/heap_increased.3.gz
OLD_FILES+=usr/share/man/man3/heap_insert.3.gz
OLD_FILES+=usr/share/man/man3/heap_new.3.gz
OLD_FILES+=usr/share/man/man3/log_add_channel.3.gz
OLD_FILES+=usr/share/man/man3/log_category_is_active.3.gz
OLD_FILES+=usr/share/man/man3/log_close_stream.3.gz
OLD_FILES+=usr/share/man/man3/log_dec_references.3.gz
OLD_FILES+=usr/share/man/man3/log_free_channel.3.gz
OLD_FILES+=usr/share/man/man3/log_free_context.3.gz
OLD_FILES+=usr/share/man/man3/log_get_filename.3.gz
OLD_FILES+=usr/share/man/man3/log_get_stream.3.gz
OLD_FILES+=usr/share/man/man3/log_inc_references.3.gz
OLD_FILES+=usr/share/man/man3/log_new_context.3.gz
OLD_FILES+=usr/share/man/man3/log_new_file_channel.3.gz
OLD_FILES+=usr/share/man/man3/log_new_null_channel.3.gz
OLD_FILES+=usr/share/man/man3/log_new_syslog_channel.3.gz
OLD_FILES+=usr/share/man/man3/log_open_stream.3.gz
OLD_FILES+=usr/share/man/man3/log_option.3.gz
OLD_FILES+=usr/share/man/man3/log_remove_channel.3.gz
OLD_FILES+=usr/share/man/man3/log_set_file_owner.3.gz
OLD_FILES+=usr/share/man/man3/log_vwrite.3.gz
OLD_FILES+=usr/share/man/man3/log_write.3.gz
OLD_FILES+=usr/share/man/man3/logging.3.gz
OLD_FILES+=usr/share/man/man3/memcluster.3.gz
OLD_FILES+=usr/share/man/man3/memget.3.gz
OLD_FILES+=usr/share/man/man3/memput.3.gz
OLD_FILES+=usr/share/man/man3/memstats.3.gz
OLD_FILES+=usr/share/man/man3/set_assertion_failure_callback.3.
OLD_FILES+=usr/share/man/man3/sigwait.3.gz
OLD_FILES+=usr/share/man/man3/tree_add.3.gz
OLD_FILES+=usr/share/man/man3/tree_delete.3.gz
OLD_FILES+=usr/share/man/man3/tree_init.3.gz
OLD_FILES+=usr/share/man/man3/tree_mung.3.gz
OLD_FILES+=usr/share/man/man3/tree_srch.3.gz
OLD_FILES+=usr/share/man/man3/tree_trav.3.gz
# 2004XXYY: OS internal libs, no ports use them, no need to use OLD_LIBS
OLD_LIBS+=lib/geom/geom_concat.so.1
OLD_LIBS+=lib/geom/geom_label.so.1
OLD_LIBS+=lib/geom/geom_nop.so.1
OLD_LIBS+=lib/geom/geom_stripe.so.1
# 20040728: GCC 3.4.2
OLD_DIRS+=usr/include/c++/3.3
OLD_FILES+=usr/include/c++/3.3/FlexLexer.h
OLD_FILES+=usr/include/c++/3.3/algorithm
OLD_FILES+=usr/include/c++/3.3/backward/algo.h
OLD_FILES+=usr/include/c++/3.3/backward/algobase.h
OLD_FILES+=usr/include/c++/3.3/backward/alloc.h
OLD_FILES+=usr/include/c++/3.3/backward/backward_warning.h
OLD_FILES+=usr/include/c++/3.3/backward/bvector.h
OLD_FILES+=usr/include/c++/3.3/backward/complex.h
OLD_FILES+=usr/include/c++/3.3/backward/defalloc.h
OLD_FILES+=usr/include/c++/3.3/backward/deque.h
OLD_FILES+=usr/include/c++/3.3/backward/fstream.h
OLD_FILES+=usr/include/c++/3.3/backward/function.h
OLD_FILES+=usr/include/c++/3.3/backward/hash_map.h
OLD_FILES+=usr/include/c++/3.3/backward/hash_set.h
OLD_FILES+=usr/include/c++/3.3/backward/hashtable.h
OLD_FILES+=usr/include/c++/3.3/backward/heap.h
OLD_FILES+=usr/include/c++/3.3/backward/iomanip.h
OLD_FILES+=usr/include/c++/3.3/backward/iostream.h
OLD_FILES+=usr/include/c++/3.3/backward/istream.h
OLD_FILES+=usr/include/c++/3.3/backward/iterator.h
OLD_FILES+=usr/include/c++/3.3/backward/list.h
OLD_FILES+=usr/include/c++/3.3/backward/map.h
OLD_FILES+=usr/include/c++/3.3/backward/multimap.h
OLD_FILES+=usr/include/c++/3.3/backward/multiset.h
OLD_FILES+=usr/include/c++/3.3/backward/new.h
OLD_FILES+=usr/include/c++/3.3/backward/ostream.h
OLD_FILES+=usr/include/c++/3.3/backward/pair.h
OLD_FILES+=usr/include/c++/3.3/backward/queue.h
OLD_FILES+=usr/include/c++/3.3/backward/rope.h
OLD_FILES+=usr/include/c++/3.3/backward/set.h
OLD_FILES+=usr/include/c++/3.3/backward/slist.h
OLD_FILES+=usr/include/c++/3.3/backward/stack.h
OLD_FILES+=usr/include/c++/3.3/backward/stream.h
OLD_FILES+=usr/include/c++/3.3/backward/streambuf.h
OLD_FILES+=usr/include/c++/3.3/backward/strstream
OLD_FILES+=usr/include/c++/3.3/backward/strstream.h
OLD_FILES+=usr/include/c++/3.3/backward/tempbuf.h
OLD_FILES+=usr/include/c++/3.3/backward/tree.h
OLD_FILES+=usr/include/c++/3.3/backward/vector.h
OLD_DIRS+=usr/include/c++/3.3/backward
OLD_FILES+=usr/include/c++/3.3/bits/atomicity.h
OLD_FILES+=usr/include/c++/3.3/bits/basic_file.h
OLD_FILES+=usr/include/c++/3.3/bits/basic_ios.h
OLD_FILES+=usr/include/c++/3.3/bits/basic_ios.tcc
OLD_FILES+=usr/include/c++/3.3/bits/basic_string.h
OLD_FILES+=usr/include/c++/3.3/bits/basic_string.tcc
OLD_FILES+=usr/include/c++/3.3/bits/boost_concept_check.h
OLD_FILES+=usr/include/c++/3.3/bits/c++config.h
OLD_FILES+=usr/include/c++/3.3/bits/c++io.h
OLD_FILES+=usr/include/c++/3.3/bits/c++locale.h
OLD_FILES+=usr/include/c++/3.3/bits/c++locale_internal.h
OLD_FILES+=usr/include/c++/3.3/bits/char_traits.h
OLD_FILES+=usr/include/c++/3.3/bits/cmath.tcc
OLD_FILES+=usr/include/c++/3.3/bits/codecvt.h
OLD_FILES+=usr/include/c++/3.3/bits/codecvt_specializations.h
OLD_FILES+=usr/include/c++/3.3/bits/concept_check.h
OLD_FILES+=usr/include/c++/3.3/bits/cpp_type_traits.h
OLD_FILES+=usr/include/c++/3.3/bits/ctype_base.h
OLD_FILES+=usr/include/c++/3.3/bits/ctype_inline.h
OLD_FILES+=usr/include/c++/3.3/bits/ctype_noninline.h
OLD_FILES+=usr/include/c++/3.3/bits/deque.tcc
OLD_FILES+=usr/include/c++/3.3/bits/fpos.h
OLD_FILES+=usr/include/c++/3.3/bits/fstream.tcc
OLD_FILES+=usr/include/c++/3.3/bits/functexcept.h
OLD_FILES+=usr/include/c++/3.3/bits/generic_shadow.h
OLD_FILES+=usr/include/c++/3.3/bits/gslice.h
OLD_FILES+=usr/include/c++/3.3/bits/gslice_array.h
OLD_FILES+=usr/include/c++/3.3/bits/gthr-default.h
OLD_FILES+=usr/include/c++/3.3/bits/gthr-posix.h
OLD_FILES+=usr/include/c++/3.3/bits/gthr-single.h
OLD_FILES+=usr/include/c++/3.3/bits/gthr.h
OLD_FILES+=usr/include/c++/3.3/bits/indirect_array.h
OLD_FILES+=usr/include/c++/3.3/bits/ios_base.h
OLD_FILES+=usr/include/c++/3.3/bits/istream.tcc
OLD_FILES+=usr/include/c++/3.3/bits/list.tcc
OLD_FILES+=usr/include/c++/3.3/bits/locale_classes.h
OLD_FILES+=usr/include/c++/3.3/bits/locale_facets.h
OLD_FILES+=usr/include/c++/3.3/bits/locale_facets.tcc
OLD_FILES+=usr/include/c++/3.3/bits/localefwd.h
OLD_FILES+=usr/include/c++/3.3/bits/mask_array.h
OLD_FILES+=usr/include/c++/3.3/bits/messages_members.h
OLD_FILES+=usr/include/c++/3.3/bits/os_defines.h
OLD_FILES+=usr/include/c++/3.3/bits/ostream.tcc
OLD_FILES+=usr/include/c++/3.3/bits/pthread_allocimpl.h
OLD_FILES+=usr/include/c++/3.3/bits/slice.h
OLD_FILES+=usr/include/c++/3.3/bits/slice_array.h
OLD_FILES+=usr/include/c++/3.3/bits/sstream.tcc
OLD_FILES+=usr/include/c++/3.3/bits/stl_algo.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_algobase.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_alloc.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_bvector.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_construct.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_deque.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_function.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_heap.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_iterator.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_iterator_base_funcs.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_iterator_base_types.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_list.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_map.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_multimap.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_multiset.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_numeric.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_pair.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_pthread_alloc.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_queue.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_raw_storage_iter.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_relops.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_set.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_stack.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_tempbuf.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_threads.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_tree.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_uninitialized.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_vector.h
OLD_FILES+=usr/include/c++/3.3/bits/stream_iterator.h
OLD_FILES+=usr/include/c++/3.3/bits/streambuf.tcc
OLD_FILES+=usr/include/c++/3.3/bits/streambuf_iterator.h
OLD_FILES+=usr/include/c++/3.3/bits/stringfwd.h
OLD_FILES+=usr/include/c++/3.3/bits/time_members.h
OLD_FILES+=usr/include/c++/3.3/bits/type_traits.h
OLD_FILES+=usr/include/c++/3.3/bits/valarray_array.h
OLD_FILES+=usr/include/c++/3.3/bits/valarray_array.tcc
OLD_FILES+=usr/include/c++/3.3/bits/valarray_meta.h
OLD_FILES+=usr/include/c++/3.3/bits/vector.tcc
OLD_DIRS+=usr/include/c++/3.3/bits
OLD_FILES+=usr/include/c++/3.3/bitset
OLD_FILES+=usr/include/c++/3.3/cassert
OLD_FILES+=usr/include/c++/3.3/cctype
OLD_FILES+=usr/include/c++/3.3/cerrno
OLD_FILES+=usr/include/c++/3.3/cfloat
OLD_FILES+=usr/include/c++/3.3/ciso646
OLD_FILES+=usr/include/c++/3.3/climits
OLD_FILES+=usr/include/c++/3.3/clocale
OLD_FILES+=usr/include/c++/3.3/cmath
OLD_FILES+=usr/include/c++/3.3/complex
OLD_FILES+=usr/include/c++/3.3/csetjmp
OLD_FILES+=usr/include/c++/3.3/csignal
OLD_FILES+=usr/include/c++/3.3/cstdarg
OLD_FILES+=usr/include/c++/3.3/cstddef
OLD_FILES+=usr/include/c++/3.3/cstdio
OLD_FILES+=usr/include/c++/3.3/cstdlib
OLD_FILES+=usr/include/c++/3.3/cstring
OLD_FILES+=usr/include/c++/3.3/ctime
OLD_FILES+=usr/include/c++/3.3/cwchar
OLD_FILES+=usr/include/c++/3.3/cwctype
OLD_FILES+=usr/include/c++/3.3/cxxabi.h
OLD_FILES+=usr/include/c++/3.3/deque
OLD_FILES+=usr/include/c++/3.3/exception
OLD_FILES+=usr/include/c++/3.3/exception_defines.h
OLD_FILES+=usr/include/c++/3.3/ext/algorithm
OLD_FILES+=usr/include/c++/3.3/ext/enc_filebuf.h
OLD_FILES+=usr/include/c++/3.3/ext/functional
OLD_FILES+=usr/include/c++/3.3/ext/hash_map
OLD_FILES+=usr/include/c++/3.3/ext/hash_set
OLD_FILES+=usr/include/c++/3.3/ext/iterator
OLD_FILES+=usr/include/c++/3.3/ext/memory
OLD_FILES+=usr/include/c++/3.3/ext/numeric
OLD_FILES+=usr/include/c++/3.3/ext/rb_tree
OLD_FILES+=usr/include/c++/3.3/ext/rope
OLD_FILES+=usr/include/c++/3.3/ext/ropeimpl.h
OLD_FILES+=usr/include/c++/3.3/ext/slist
OLD_FILES+=usr/include/c++/3.3/ext/stdio_filebuf.h
OLD_FILES+=usr/include/c++/3.3/ext/stl_hash_fun.h
OLD_FILES+=usr/include/c++/3.3/ext/stl_hashtable.h
OLD_FILES+=usr/include/c++/3.3/ext/stl_rope.h
OLD_DIRS+=usr/include/c++/3.3/ext
OLD_FILES+=usr/include/c++/3.3/fstream
OLD_FILES+=usr/include/c++/3.3/functional
OLD_FILES+=usr/include/c++/3.3/iomanip
OLD_FILES+=usr/include/c++/3.3/ios
OLD_FILES+=usr/include/c++/3.3/iosfwd
OLD_FILES+=usr/include/c++/3.3/iostream
OLD_FILES+=usr/include/c++/3.3/istream
OLD_FILES+=usr/include/c++/3.3/iterator
OLD_FILES+=usr/include/c++/3.3/limits
OLD_FILES+=usr/include/c++/3.3/list
OLD_FILES+=usr/include/c++/3.3/locale
OLD_FILES+=usr/include/c++/3.3/map
OLD_FILES+=usr/include/c++/3.3/memory
OLD_FILES+=usr/include/c++/3.3/new
OLD_FILES+=usr/include/c++/3.3/numeric
OLD_FILES+=usr/include/c++/3.3/ostream
OLD_FILES+=usr/include/c++/3.3/queue
OLD_FILES+=usr/include/c++/3.3/set
OLD_FILES+=usr/include/c++/3.3/sstream
OLD_FILES+=usr/include/c++/3.3/stack
OLD_FILES+=usr/include/c++/3.3/stdexcept
OLD_FILES+=usr/include/c++/3.3/streambuf
OLD_FILES+=usr/include/c++/3.3/string
OLD_FILES+=usr/include/c++/3.3/typeinfo
OLD_FILES+=usr/include/c++/3.3/utility
OLD_FILES+=usr/include/c++/3.3/valarray
OLD_FILES+=usr/include/c++/3.3/vector
# 20040713: fla(4) removed
OLD_FILES+=usr/share/man/man4/fla.4.gz
# 200407XX
OLD_FILES+=usr/sbin/kernbb
OLD_FILES+=usr/sbin/ntp-genkeys
OLD_FILES+=usr/sbin/ntptimeset
OLD_FILES+=usr/share/man/man8/kernbb.8.gz
OLD_FILES+=usr/share/man/man8/ntp-genkeys.8.gz
# 20040627: usbdevs.h and usbdevs_data.h removal
OLD_FILES+=usr/include/dev/usb/usbdevs.h
OLD_FILES+=usr/include/dev/usb/usbdevs_data.h
# 200406XX
OLD_FILES+=usr/bin/gasp
OLD_FILES+=usr/bin/gdbreplay
OLD_FILES+=usr/share/man/man1/gasp.1.gz
OLD_FILES+=sbin/mountd
OLD_FILES+=sbin/mount_fdesc
OLD_FILES+=sbin/mount_umap
OLD_FILES+=sbin/mount_union
OLD_FILES+=sbin/mount_msdos
OLD_FILES+=sbin/mount_null
OLD_FILES+=sbin/mount_kernfs
# 200405XX: arl
OLD_FILES+=usr/sbin/arlconfig
OLD_FILES+=usr/share/man/man8/arlconfig.8.gz
# 200403XX
OLD_FILES+=bin/raidctl
OLD_FILES+=sbin/raidctl
OLD_FILES+=usr/bin/sasc
OLD_FILES+=usr/sbin/sgsc
OLD_FILES+=usr/sbin/stlload
OLD_FILES+=usr/sbin/stlstats
OLD_FILES+=usr/share/man/man1/sasc.1.gz
OLD_FILES+=usr/share/man/man1/sgsc.1.gz
OLD_FILES+=usr/share/man/man4/i386/stl.4.gz
OLD_FILES+=usr/share/man/man8/raidctl.8.gz
# 20040229: clean_environment() was removed after 3 days
OLD_FILES+=usr/share/man/man3/clean_environment.3.gz
# 20040119: installed as `isdntel' in newer systems
OLD_FILES+=etc/isdn/isdntel.sh
# 200XYYZZ: /lib transition clitches
OLD_FILES+=lib/libalias.so
OLD_FILES+=lib/libatm.so
OLD_FILES+=lib/libbsdxml.so
OLD_FILES+=lib/libc.so
OLD_FILES+=lib/libcam.so
OLD_FILES+=lib/libcrypt.so
OLD_FILES+=lib/libcrypto.so
OLD_FILES+=lib/libdevstat.so
OLD_FILES+=lib/libedit.so
OLD_FILES+=lib/libgeom.so
OLD_FILES+=lib/libipsec.so
OLD_FILES+=lib/libipx.so
OLD_FILES+=lib/libkvm.so
OLD_FILES+=lib/libm.so
OLD_FILES+=lib/libmd.so
OLD_FILES+=lib/libncurses.so
OLD_FILES+=lib/libreadline.so
OLD_FILES+=lib/libsbuf.so
OLD_FILES+=lib/libufs.so
OLD_FILES+=lib/libz.so
# 200312XX
OLD_FILES+=bin/cxconfig
OLD_FILES+=sbin/cxconfig
OLD_FILES+=usr/share/man/man8/cxconfig.8.gz
# 20031016: MULTI_DRIVER_MODULE macro removed
OLD_FILES+=usr/share/man/man9/MULTI_DRIVER_MODULE.9.gz
# 200309XX
OLD_FILES+=usr/bin/symorder
OLD_FILES+=usr/share/man/man1/symorder.1.gz
# 200308XX
OLD_FILES+=usr/sbin/amldb
OLD_FILES+=usr/share/man/man8/amldb.8.gz
# 200307XX
OLD_FILES+=sbin/mount_nwfs
OLD_FILES+=sbin/mount_portalfs
OLD_FILES+=sbin/mount_smbfs
# 200306XX
OLD_FILES+=usr/sbin/dev_mkdb
OLD_FILES+=usr/share/man/man8/dev_mkdb.8.gz
# 200304XX
OLD_FILES+=usr/lib/libcipher.a
OLD_FILES+=usr/lib/libcipher.so
OLD_FILES+=usr/lib/libcipher_p.a
OLD_FILES+=usr/lib/libgmp.a
OLD_FILES+=usr/lib/libgmp.so
OLD_FILES+=usr/lib/libgmp_p.a
OLD_FILES+=usr/lib/libperl.a
OLD_FILES+=usr/lib/libperl.so
OLD_FILES+=usr/lib/libperl_p.a
OLD_FILES+=usr/lib/libposix1e.a
OLD_FILES+=usr/lib/libposix1e.so
OLD_FILES+=usr/lib/libposix1e_p.a
OLD_FILES+=usr/lib/libskey.a
OLD_FILES+=usr/lib/libskey.so
OLD_FILES+=usr/lib/libskey_p.a
OLD_FILES+=usr/libexec/tradcpp0
OLD_FILES+=usr/libexec/cpp0
# 200304XX: removal of xten
OLD_FILES+=usr/libexec/xtend
OLD_FILES+=usr/sbin/xten
OLD_FILES+=usr/share/man/man1/xten.1.gz
OLD_FILES+=usr/share/man/man8/xtend.8.gz
# 200303XX
OLD_FILES+=usr/lib/libacl.so
OLD_FILES+=usr/lib/libdescrypt.so
OLD_FILES+=usr/lib/libf2c.so
OLD_FILES+=usr/lib/libg++.so
OLD_FILES+=usr/lib/libkdb.so
OLD_FILES+=usr/lib/librsaINTL.so
OLD_FILES+=usr/lib/libscrypt.so
OLD_FILES+=usr/lib/libss.so
# 200302XX
OLD_FILES+=usr/lib/libacl.a
OLD_FILES+=usr/lib/libacl_p.a
OLD_FILES+=usr/lib/libkadm.a
OLD_FILES+=usr/lib/libkadm.so
OLD_FILES+=usr/lib/libkadm_p.a
OLD_FILES+=usr/lib/libkafs.a
OLD_FILES+=usr/lib/libkafs.so
OLD_FILES+=usr/lib/libkafs_p.a
OLD_FILES+=usr/lib/libkdb.a
OLD_FILES+=usr/lib/libkdb_p.a
OLD_FILES+=usr/lib/libkrb.a
OLD_FILES+=usr/lib/libkrb.so
OLD_FILES+=usr/lib/libkrb_p.a
OLD_FILES+=usr/share/man/man3/SSL_CIPHER_get_name.3.gz
OLD_FILES+=usr/share/man/man3/SSL_COMP_add_compression_method.3
OLD_FILES+=usr/share/man/man3/SSL_CTX_add_extra_chain_cert.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_add_session.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_ctrl.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_flush_sessions.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_free.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_get_verify_mode.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_load_verify_locations.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_new.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_sess_number.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_sess_set_cache_size.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_sess_set_get_cb.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_sessions.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_cert_store.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_cert_verify_callback.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_cipher_list.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_client_CA_list.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_client_cert_cb.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_default_passwd_cb.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_generate_session_id.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_info_callback.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_max_cert_list.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_mode.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_msg_callback.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_options.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_quiet_shutdown.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_session_cache_mode.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_session_id_context.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_ssl_version.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_timeout.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_tmp_dh_callback.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_tmp_rsa_callback.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_verify.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_use_certificate.3.gz
OLD_FILES+=usr/share/man/man3/SSL_SESSION_free.3.gz
OLD_FILES+=usr/share/man/man3/SSL_SESSION_get_ex_new_index.3.gz
OLD_FILES+=usr/share/man/man3/SSL_SESSION_get_time.3.gz
OLD_FILES+=usr/share/man/man3/SSL_accept.3.gz
OLD_FILES+=usr/share/man/man3/SSL_alert_type_string.3.gz
OLD_FILES+=usr/share/man/man3/SSL_clear.3.gz
OLD_FILES+=usr/share/man/man3/SSL_connect.3.gz
OLD_FILES+=usr/share/man/man3/SSL_do_handshake.3.gz
OLD_FILES+=usr/share/man/man3/SSL_free.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_SSL_CTX.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_ciphers.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_client_CA_list.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_current_cipher.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_default_timeout.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_error.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_ex_data_X509_STORE_CTX_idx.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_fd.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_peer_cert_chain.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_peer_certificate.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_rbio.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_session.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_verify_result.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_version.3.gz
OLD_FILES+=usr/share/man/man3/SSL_library_init.3.gz
OLD_FILES+=usr/share/man/man3/SSL_load_client_CA_file.3.gz
OLD_FILES+=usr/share/man/man3/SSL_new.3.gz
OLD_FILES+=usr/share/man/man3/SSL_pending.3.gz
OLD_FILES+=usr/share/man/man3/SSL_read.3.gz
OLD_FILES+=usr/share/man/man3/SSL_rstate_string.3.gz
OLD_FILES+=usr/share/man/man3/SSL_session_reused.3.gz
OLD_FILES+=usr/share/man/man3/SSL_set_bio.3.gz
OLD_FILES+=usr/share/man/man3/SSL_set_connect_state.3.gz
OLD_FILES+=usr/share/man/man3/SSL_set_fd.3.gz
OLD_FILES+=usr/share/man/man3/SSL_set_session.3.gz
OLD_FILES+=usr/share/man/man3/SSL_set_shutdown.3.gz
OLD_FILES+=usr/share/man/man3/SSL_set_verify_result.3.gz
OLD_FILES+=usr/share/man/man3/SSL_shutdown.3.gz
OLD_FILES+=usr/share/man/man3/SSL_state_string.3.gz
OLD_FILES+=usr/share/man/man3/SSL_want.3.gz
OLD_FILES+=usr/share/man/man3/SSL_write.3.gz
OLD_FILES+=usr/share/man/man3/d2i_SSL_SESSION.3.gz
# 200301XX
OLD_FILES+=usr/share/man/man3/des_3cbc_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/des_3ecb_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/des_cbc_cksum.3.gz
OLD_FILES+=usr/share/man/man3/des_cbc_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/des_cfb_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/des_ecb_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/des_enc_read.3.gz
OLD_FILES+=usr/share/man/man3/des_enc_write.3.gz
OLD_FILES+=usr/share/man/man3/des_is_weak_key.3.gz
OLD_FILES+=usr/share/man/man3/des_key_sched.3.gz
OLD_FILES+=usr/share/man/man3/des_ofb_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/des_pcbc_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/des_quad_cksum.3.gz
OLD_FILES+=usr/share/man/man3/des_random_key.3.gz
OLD_FILES+=usr/share/man/man3/des_read_2password.3.gz
OLD_FILES+=usr/share/man/man3/des_read_password.3.gz
OLD_FILES+=usr/share/man/man3/des_read_pw_string.3.gz
OLD_FILES+=usr/share/man/man3/des_set_key.3.gz
OLD_FILES+=usr/share/man/man3/des_set_odd_parity.3.gz
OLD_FILES+=usr/share/man/man3/des_string_to_2key.3.gz
OLD_FILES+=usr/share/man/man3/des_string_to_key.3.gz
# 200212XX
OLD_FILES+=usr/sbin/kenv
OLD_FILES+=usr/bin/kenv
OLD_FILES+=usr/sbin/elf2aout
# 200210XX
OLD_FILES+=usr/include/libusbhid.h
OLD_FILES+=usr/share/man/man3/All_FreeBSD.3.gz
OLD_FILES+=usr/share/man/man3/CheckRules.3.gz
OLD_FILES+=usr/share/man/man3/ChunkCanBeRoot.3.gz
OLD_FILES+=usr/share/man/man3/Clone_Disk.3.gz
OLD_FILES+=usr/share/man/man3/Collapse_Chunk.3.gz
OLD_FILES+=usr/share/man/man3/Collapse_Disk.3.gz
OLD_FILES+=usr/share/man/man3/Create_Chunk.3.gz
OLD_FILES+=usr/share/man/man3/Create_Chunk_DWIM.3.gz
OLD_FILES+=usr/share/man/man3/Cyl_Aligned.3.gz
OLD_FILES+=usr/share/man/man3/Debug_Disk.3.gz
OLD_FILES+=usr/share/man/man3/Delete_Chunk.3.gz
OLD_FILES+=usr/share/man/man3/Disk_Names.3.gz
OLD_FILES+=usr/share/man/man3/Free_Disk.3.gz
OLD_FILES+=usr/share/man/man3/MakeDev.3.gz
OLD_FILES+=usr/share/man/man3/MakeDevDisk.3.gz
OLD_FILES+=usr/share/man/man3/Next_Cyl_Aligned.3.gz
OLD_FILES+=usr/share/man/man3/Next_Track_Aligned.3.gz
OLD_FILES+=usr/share/man/man3/Open_Disk.3.gz
OLD_FILES+=usr/share/man/man3/Prev_Cyl_Aligned.3.gz
OLD_FILES+=usr/share/man/man3/Prev_Track_Aligned.3.gz
OLD_FILES+=usr/share/man/man3/Set_Bios_Geom.3.gz
OLD_FILES+=usr/share/man/man3/Set_Boot_Blocks.3.gz
OLD_FILES+=usr/share/man/man3/Set_Boot_Mgr.3.gz
OLD_FILES+=usr/share/man/man3/ShowChunkFlags.3.gz
OLD_FILES+=usr/share/man/man3/Track_Aligned.3.gz
OLD_FILES+=usr/share/man/man3/Write_Disk.3.gz
OLD_FILES+=usr/share/man/man3/slice_type_name.3.gz
# 200210XX: most games moved to ports
OLD_FILES+=usr/share/man/man6/adventure.6.gz
OLD_FILES+=usr/share/man/man6/arithmetic.6.gz
OLD_FILES+=usr/share/man/man6/atc.6.gz
OLD_FILES+=usr/share/man/man6/backgammon.6.gz
OLD_FILES+=usr/share/man/man6/battlestar.6.gz
OLD_FILES+=usr/share/man/man6/bs.6.gz
OLD_FILES+=usr/share/man/man6/canfield.6.gz
OLD_FILES+=usr/share/man/man6/cfscores.6.gz
OLD_FILES+=usr/share/man/man6/cribbage.6.gz
OLD_FILES+=usr/share/man/man6/fish.6.gz
OLD_FILES+=usr/share/man/man6/hack.6.gz
OLD_FILES+=usr/share/man/man6/hangman.6.gz
OLD_FILES+=usr/share/man/man6/larn.6.gz
OLD_FILES+=usr/share/man/man6/mille.6.gz
OLD_FILES+=usr/share/man/man6/phantasia.6.gz
OLD_FILES+=usr/share/man/man6/piano.6.gz
OLD_FILES+=usr/share/man/man6/pig.6.gz
OLD_FILES+=usr/share/man/man6/quiz.6.gz
OLD_FILES+=usr/share/man/man6/rain.6.gz
OLD_FILES+=usr/share/man/man6/robots.6.gz
OLD_FILES+=usr/share/man/man6/rogue.6.gz
OLD_FILES+=usr/share/man/man6/sail.6.gz
OLD_FILES+=usr/share/man/man6/snake.6.gz
OLD_FILES+=usr/share/man/man6/snscore.6.gz
OLD_FILES+=usr/share/man/man6/trek.6.gz
OLD_FILES+=usr/share/man/man6/wargames.6.gz
OLD_FILES+=usr/share/man/man6/worm.6.gz
OLD_FILES+=usr/share/man/man6/worms.6.gz
OLD_FILES+=usr/share/man/man6/wump.6.gz
# 200207XX
OLD_FILES+=usr/share/man/man1aout/ar.1aout.gz
OLD_FILES+=usr/share/man/man1aout/as.1aout.gz
OLD_FILES+=usr/share/man/man1aout/ld.1aout.gz
OLD_FILES+=usr/share/man/man1aout/nm.1aout.gz
OLD_FILES+=usr/share/man/man1aout/ranlib.1aout.gz
OLD_FILES+=usr/share/man/man1aout/size.1aout.gz
OLD_FILES+=usr/share/man/man1aout/strings.1aout.gz
OLD_FILES+=usr/share/man/man1aout/strip.1aout.gz
OLD_FILES+=bin/mountd
OLD_FILES+=bin/nfsd
# 20020707 sbin/nfsd -> usr.sbin/nfsd
OLD_FILES+=sbin/nfsd
# 200206XX
OLD_FILES+=usr/lib/libpam_ssh.a
OLD_FILES+=usr/lib/libpam_ssh_p.a
OLD_FILES+=usr/bin/help
OLD_FILES+=usr/bin/sccs
.if ${TARGET_ARCH} != "amd64" && ${TARGET} != "arm" && ${TARGET_ARCH} != "i386" && ${TARGET} != "powerpc"
OLD_FILES+=usr/bin/gdbserver
.endif
OLD_FILES+=usr/bin/ssh-keysign
OLD_FILES+=usr/sbin/gifconfig
OLD_FILES+=usr/sbin/prefix
# 200205XX
OLD_FILES+=usr/bin/doscmd
# 200204XX
OLD_FILES+=usr/bin/a2p
OLD_FILES+=usr/bin/ptx
OLD_FILES+=usr/bin/pod2text
OLD_FILES+=usr/bin/pod2man
OLD_FILES+=usr/bin/pod2latex
OLD_FILES+=usr/bin/pod2html
OLD_FILES+=usr/bin/h2ph
OLD_FILES+=usr/bin/dprofpp
OLD_FILES+=usr/bin/c2ph
OLD_FILES+=usr/bin/h2xs
OLD_FILES+=usr/bin/pl2pm
OLD_FILES+=usr/bin/splain
OLD_FILES+=usr/bin/s2p
OLD_FILES+=usr/bin/find2perl
OLD_FILES+=usr/sbin/pkg_update
OLD_FILES+=usr/sbin/scriptdump
# 20020409 GC kget(1), userconfig is long dead.
OLD_FILES+=sbin/kget
OLD_FILES+=usr/share/man/man8/kget.8.gz
# 200203XX
OLD_FILES+=usr/lib/libss.a
OLD_FILES+=usr/lib/libss_p.a
OLD_FILES+=usr/lib/libtelnet.a
OLD_FILES+=usr/lib/libtelnet_p.a
OLD_FILES+=usr/sbin/diskpart
# 200202XX
OLD_FILES+=usr/bin/gprof4
# 200201XX
OLD_FILES+=usr/sbin/linux
# 2001XXXX
OLD_FILES+=usr/bin/joy
OLD_FILES+=usr/sbin/ibcs2
OLD_FILES+=usr/sbin/svr4
OLD_FILES+=usr/bin/chflags
OLD_FILES+=usr/sbin/uuconv
OLD_FILES+=usr/sbin/uuchk
OLD_FILES+=usr/sbin/portmap
OLD_FILES+=usr/sbin/pmap_set
OLD_FILES+=usr/sbin/pmap_dump
OLD_FILES+=usr/sbin/mcon
OLD_FILES+=usr/sbin/stlstty
OLD_FILES+=usr/sbin/ispppcontrol
OLD_FILES+=usr/sbin/rndcontrol
# 20011001: UUCP migration to ports
OLD_FILES+=usr/bin/uucp
OLD_FILES+=usr/bin/uulog
OLD_FILES+=usr/bin/uuname
OLD_FILES+=usr/bin/uupick
OLD_FILES+=usr/bin/uusched
OLD_FILES+=usr/bin/uustat
OLD_FILES+=usr/bin/uuto
OLD_FILES+=usr/bin/uux
OLD_FILES+=usr/libexec/uucp/uucico
OLD_FILES+=usr/libexec/uucp/uuxqt
OLD_FILES+=usr/libexec/uucpd
OLD_FILES+=usr/share/man/man1/uuconv.1.gz
OLD_FILES+=usr/share/man/man1/uucp.1.gz
OLD_FILES+=usr/share/man/man1/uulog.1.gz
OLD_FILES+=usr/share/man/man1/uuname.1.gz
OLD_FILES+=usr/share/man/man1/uupick.1.gz
OLD_FILES+=usr/share/man/man1/uustat.1.gz
OLD_FILES+=usr/share/man/man1/uuto.1.gz
OLD_FILES+=usr/share/man/man1/uux.1.gz
OLD_FILES+=usr/share/man/man8/uuchk.8.gz
OLD_FILES+=usr/share/man/man8/uucico.8.gz
OLD_FILES+=usr/share/man/man8/uucpd.8.gz
OLD_FILES+=usr/share/man/man8/uusched.8.gz
OLD_FILES+=usr/share/man/man8/uuxqt.8.gz
# 20010523 mount_portal -> mount_portalfs
OLD_FILES+=sbin/mount_portal
OLD_FILES+=usr/share/man/man8/mount_portal.8.gz
# 200104XX
OLD_FILES+=usr/lib/libdescrypt.a
OLD_FILES+=usr/lib/libscrypt.a
OLD_FILES+=usr/lib/libscrypt_p.a
OLD_FILES+=usr/sbin/pim6stat
OLD_FILES+=usr/sbin/pim6sd
OLD_FILES+=usr/sbin/pim6dd
# 20010217
OLD_FILES+=usr/share/doc/bind/misc/dns-setup
# 20001200
OLD_FILES+=usr/lib/libgcc_r_pic.a
# 200009XX
OLD_FILES+=usr/lib/libRSAglue.a
OLD_FILES+=usr/lib/libRSAglue.so
OLD_FILES+=usr/lib/librsaINTL.a
OLD_FILES+=usr/lib/librsaUSA.a
OLD_FILES+=usr/lib/librsaUSA.so
# 200002XX ?
OLD_FILES+=usr/lib/libf2c.a
OLD_FILES+=usr/lib/libf2c_p.a
OLD_FILES+=usr/lib/libg++.a
OLD_FILES+=usr/lib/libg++_p.a
# 20001006
OLD_FILES+=usr/bin/miniperl
# 20000810
OLD_FILES+=usr/bin/sperl
# 200001XX
OLD_FILES+=usr/sbin/apmconf
## unsorted
# do we still support aout builds?
#OLD_FILES+=usr/lib/aout/c++rt0.o
#OLD_FILES+=usr/lib/aout/crt0.o
#OLD_FILES+=usr/lib/aout/gcrt0.o
#OLD_FILES+=usr/lib/aout/scrt0.o
#OLD_FILES+=usr/lib/aout/sgcrt0.o
OLD_FILES+=usr/lib/pam_ftp.so
OLD_FILES+=usr/share/man/man1/CA.pl.1.gz
OLD_FILES+=usr/share/man/man1/asn1parse.1.gz
OLD_FILES+=usr/share/man/man1/ca.1.gz
OLD_FILES+=usr/share/man/man1/ciphers.1.gz
OLD_FILES+=usr/share/man/man1/config.1.gz
OLD_FILES+=usr/share/man/man1/crl.1.gz
OLD_FILES+=usr/share/man/man1/crl2pkcs7.1.gz
OLD_FILES+=usr/share/man/man1/dgst.1.gz
OLD_FILES+=usr/share/man/man1/dhparam.1.gz
OLD_FILES+=usr/share/man/man1/doscmd.1.gz
OLD_FILES+=usr/share/man/man1/dsa.1.gz
OLD_FILES+=usr/share/man/man1/dsaparam.1.gz
OLD_FILES+=usr/share/man/man1/enc.1.gz
OLD_FILES+=usr/share/man/man1/gendsa.1.gz
OLD_FILES+=usr/share/man/man1/genrsa.1.gz
OLD_FILES+=usr/share/man/man1/getNAME.1.gz
OLD_FILES+=usr/share/man/man1/nseq.1.gz
OLD_FILES+=usr/share/man/man1/ocsp.1.gz
OLD_FILES+=usr/share/man/man1/openssl.1.gz
OLD_FILES+=usr/share/man/man1/pkcs12.1.gz
OLD_FILES+=usr/share/man/man1/pkcs7.1.gz
OLD_FILES+=usr/share/man/man1/pkcs8.1.gz
OLD_FILES+=usr/share/man/man1/rand.1.gz
OLD_FILES+=usr/share/man/man1/req.1.gz
OLD_FILES+=usr/share/man/man1/rsa.1.gz
OLD_FILES+=usr/share/man/man1/rsautl.1.gz
OLD_FILES+=usr/share/man/man1/s_client.1.gz
OLD_FILES+=usr/share/man/man1/s_server.1.gz
OLD_FILES+=usr/share/man/man1/sess_id.1.gz
OLD_FILES+=usr/share/man/man1/smime.1.gz
OLD_FILES+=usr/share/man/man1/speed.1.gz
OLD_FILES+=usr/share/man/man1/spkac.1.gz
OLD_FILES+=usr/share/man/man1/verify.1.gz
OLD_FILES+=usr/share/man/man1/version.1.gz
OLD_FILES+=usr/share/man/man1/x509.1.gz
OLD_FILES+=usr/share/man/man3/SSL_COMP_add_compression_method.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_get_ex_new_index.3.gz
OLD_FILES+=usr/share/man/man3/archive_entry_dup.3.gz
OLD_FILES+=usr/share/man/man3/archive_entry_set_tartype.3.gz
OLD_FILES+=usr/share/man/man3/archive_entry_tartype.3.gz
OLD_FILES+=usr/share/man/man3/archive_read_data_into_file.3.gz
OLD_FILES+=usr/share/man/man3/archive_read_open_tar.3.gz
OLD_FILES+=usr/share/man/man3/archive_read_support_format_gnutar.3.gz
OLD_FILES+=usr/share/man/man3/cipher.3.gz
OLD_FILES+=usr/share/man/man3/des_cipher.3.gz
OLD_FILES+=usr/share/man/man3/des_setkey.3.gz
OLD_FILES+=usr/share/man/man3/encrypt.3.gz
OLD_FILES+=usr/share/man/man3/endvfsent.3.gz
OLD_FILES+=usr/share/man/man3/getvfsbytype.3.gz
OLD_FILES+=usr/share/man/man3/getvfsent.3.gz
OLD_FILES+=usr/share/man/man3/isnanf.3.gz
OLD_FILES+=usr/share/man/man3/libautofs.3.gz
OLD_FILES+=usr/share/man/man3/pthread_attr_setsstack.3.gz
OLD_FILES+=usr/share/man/man3/pthread_getcancelstate.3.gz
OLD_FILES+=usr/share/man/man3/set_assertion_failure_callback.3.gz
OLD_FILES+=usr/share/man/man3/setkey.3.gz
OLD_FILES+=usr/share/man/man3/setvfsent.3.gz
OLD_FILES+=usr/share/man/man3/ssl.3.gz
OLD_FILES+=usr/share/man/man3/vfsisloadable.3.gz
OLD_FILES+=usr/share/man/man3/vfsload.3.gz
OLD_FILES+=usr/share/man/man4/als4000.4.gz
OLD_FILES+=usr/share/man/man4/csa.4.gz
OLD_FILES+=usr/share/man/man4/emu10k1.4.gz
OLD_FILES+=usr/share/man/man4/euc.4.gz
OLD_FILES+=usr/share/man/man4/gusc.4.gz
OLD_FILES+=usr/share/man/man4/if_fwp.4.gz
OLD_FILES+=usr/share/man/man4/lomac.4.gz
OLD_FILES+=usr/share/man/man4/maestro3.4.gz
OLD_FILES+=usr/share/man/man4/raid.4.gz
OLD_FILES+=usr/share/man/man4/sbc.4.gz
OLD_FILES+=usr/share/man/man4/sd.4.gz
OLD_FILES+=usr/share/man/man4/snc.4.gz
OLD_FILES+=usr/share/man/man4/st.4.gz
OLD_FILES+=usr/share/man/man4/uaudio.4.gz
OLD_FILES+=usr/share/man/man4/utf2.4.gz
OLD_FILES+=usr/share/man/man4/vinumdebug.4.gz
OLD_FILES+=usr/share/man/man5/disklabel.5.gz
OLD_FILES+=usr/share/man/man5/dm.conf.5.gz
OLD_FILES+=usr/share/man/man5/ranlib.5.gz
OLD_FILES+=usr/share/man/man5/utf2.5.gz
OLD_FILES+=usr/share/man/man7/groff_mwww.7.gz
OLD_FILES+=usr/share/man/man7/mmroff.7.gz
OLD_FILES+=usr/share/man/man7/mwww.7.gz
OLD_FILES+=usr/share/man/man8/dm.8.gz
OLD_FILES+=usr/share/man/man8/pam_ftp.8.gz
OLD_FILES+=usr/share/man/man8/pam_wheel.8.gz
OLD_FILES+=usr/share/man/man8/ssl.8.gz
OLD_FILES+=usr/share/man/man8/wlconfig.8.gz
OLD_FILES+=usr/share/man/man9/CURSIG.9.gz
OLD_FILES+=usr/share/man/man9/VFS_INIT.9.gz
OLD_FILES+=usr/share/man/man9/at_exit.9.gz
OLD_FILES+=usr/share/man/man9/at_fork.9.gz
OLD_FILES+=usr/share/man/man9/cdevsw_add.9.gz
OLD_FILES+=usr/share/man/man9/cdevsw_remove.9.gz
OLD_FILES+=usr/share/man/man9/cv_waitq_empty.9.gz
OLD_FILES+=usr/share/man/man9/cv_waitq_remove.9.gz
OLD_FILES+=usr/share/man/man9/endtsleep.9.gz
OLD_FILES+=usr/share/man/man9/jumbo.9.gz
OLD_FILES+=usr/share/man/man9/jumbo_freem.9.gz
OLD_FILES+=usr/share/man/man9/jumbo_pg_alloc.9.gz
OLD_FILES+=usr/share/man/man9/jumbo_pg_free.9.gz
OLD_FILES+=usr/share/man/man9/jumbo_pg_steal.9.gz
OLD_FILES+=usr/share/man/man9/jumbo_phys_to_kva.9.gz
OLD_FILES+=usr/share/man/man9/jumbo_vm_init.9.gz
OLD_FILES+=usr/share/man/man9/mac_biba.9.gz
OLD_FILES+=usr/share/man/man9/mac_bsdextended.9.gz
OLD_FILES+=usr/share/man/man9/mono_time.9.gz
OLD_FILES+=usr/share/man/man9/p1003_1b.9.gz
OLD_FILES+=usr/share/man/man9/pmap_prefault.9.gz
OLD_FILES+=usr/share/man/man9/posix4.9.gz
OLD_FILES+=usr/share/man/man9/resource_query_name.9.gz
OLD_FILES+=usr/share/man/man9/resource_query_string.9.gz
OLD_FILES+=usr/share/man/man9/resource_query_unit.9.gz
OLD_FILES+=usr/share/man/man9/rm_at_exit.9.gz
OLD_FILES+=usr/share/man/man9/rm_at_fork.9.gz
OLD_FILES+=usr/share/man/man9/runtime.9.gz
OLD_FILES+=usr/share/man/man9/sleepinit.9.gz
OLD_FILES+=usr/share/man/man9/unsleep.9.gz
OLD_FILES+=usr/share/games/atc/Game_List
OLD_FILES+=usr/share/games/atc/Killer
OLD_FILES+=usr/share/games/atc/crossover
OLD_FILES+=usr/share/games/atc/default
OLD_FILES+=usr/share/games/atc/easy
OLD_FILES+=usr/share/games/atc/game_2
OLD_FILES+=usr/share/games/larn/larnmaze
OLD_FILES+=usr/share/games/larn/larnopts
OLD_FILES+=usr/share/games/larn/larn.help
OLD_FILES+=usr/share/games/quiz.db/africa
OLD_FILES+=usr/share/games/quiz.db/america
OLD_FILES+=usr/share/games/quiz.db/areas
OLD_FILES+=usr/share/games/quiz.db/arith
OLD_FILES+=usr/share/games/quiz.db/asia
OLD_FILES+=usr/share/games/quiz.db/babies
OLD_FILES+=usr/share/games/quiz.db/bard
OLD_FILES+=usr/share/games/quiz.db/chinese
OLD_FILES+=usr/share/games/quiz.db/collectives
OLD_FILES+=usr/share/games/quiz.db/ed
OLD_FILES+=usr/share/games/quiz.db/elements
OLD_FILES+=usr/share/games/quiz.db/europe
OLD_FILES+=usr/share/games/quiz.db/flowers
OLD_FILES+=usr/share/games/quiz.db/greek
OLD_FILES+=usr/share/games/quiz.db/inca
OLD_FILES+=usr/share/games/quiz.db/index
OLD_FILES+=usr/share/games/quiz.db/latin
OLD_FILES+=usr/share/games/quiz.db/locomotive
OLD_FILES+=usr/share/games/quiz.db/midearth
OLD_FILES+=usr/share/games/quiz.db/morse
OLD_FILES+=usr/share/games/quiz.db/murders
OLD_FILES+=usr/share/games/quiz.db/poetry
OLD_FILES+=usr/share/games/quiz.db/posneg
OLD_FILES+=usr/share/games/quiz.db/pres
OLD_FILES+=usr/share/games/quiz.db/province
OLD_FILES+=usr/share/games/quiz.db/seq-easy
OLD_FILES+=usr/share/games/quiz.db/seq-hard
OLD_FILES+=usr/share/games/quiz.db/sexes
OLD_FILES+=usr/share/games/quiz.db/sov
OLD_FILES+=usr/share/games/quiz.db/spell
OLD_FILES+=usr/share/games/quiz.db/state
OLD_FILES+=usr/share/games/quiz.db/trek
OLD_FILES+=usr/share/games/quiz.db/ucc
OLD_FILES+=usr/share/games/cribbage.instr
OLD_FILES+=usr/share/games/fish.instr
OLD_FILES+=usr/share/games/wump.info
OLD_FILES+=usr/games/hide/adventure
OLD_FILES+=usr/games/hide/arithmetic
OLD_FILES+=usr/games/hide/atc
OLD_FILES+=usr/games/hide/backgammon
OLD_FILES+=usr/games/hide/teachgammon
OLD_FILES+=usr/games/hide/battlestar
OLD_FILES+=usr/games/hide/bs
OLD_FILES+=usr/games/hide/canfield
OLD_FILES+=usr/games/hide/cribbage
OLD_FILES+=usr/games/hide/fish
OLD_FILES+=usr/games/hide/hack
OLD_FILES+=usr/games/hide/hangman
OLD_FILES+=usr/games/hide/larn
OLD_FILES+=usr/games/hide/mille
OLD_FILES+=usr/games/hide/phantasia
OLD_FILES+=usr/games/hide/quiz
OLD_FILES+=usr/games/hide/robots
OLD_FILES+=usr/games/hide/rogue
OLD_FILES+=usr/games/hide/sail
OLD_FILES+=usr/games/hide/snake
OLD_FILES+=usr/games/hide/trek
OLD_FILES+=usr/games/hide/worm
OLD_FILES+=usr/games/hide/wump
OLD_FILES+=usr/games/adventure
OLD_FILES+=usr/games/arithmetic
OLD_FILES+=usr/games/atc
OLD_FILES+=usr/games/backgammon
OLD_FILES+=usr/games/teachgammon
OLD_FILES+=usr/games/battlestar
OLD_FILES+=usr/games/bs
OLD_FILES+=usr/games/canfield
OLD_FILES+=usr/games/cfscores
OLD_FILES+=usr/games/cribbage
OLD_FILES+=usr/games/dm
OLD_FILES+=usr/games/fish
OLD_FILES+=usr/games/hack
OLD_FILES+=usr/games/hangman
OLD_FILES+=usr/games/larn
OLD_FILES+=usr/games/mille
OLD_FILES+=usr/games/phantasia
OLD_FILES+=usr/games/piano
OLD_FILES+=usr/games/pig
OLD_FILES+=usr/games/quiz
OLD_FILES+=usr/games/rain
OLD_FILES+=usr/games/robots
OLD_FILES+=usr/games/rogue
OLD_FILES+=usr/games/sail
OLD_FILES+=usr/games/snake
OLD_FILES+=usr/games/snscore
OLD_FILES+=usr/games/trek
OLD_FILES+=usr/games/wargames
OLD_FILES+=usr/games/worm
OLD_FILES+=usr/games/worms
OLD_FILES+=usr/games/wump
OLD_FILES+=sbin/mount_reiserfs
OLD_FILES+=usr/include/cam/cam_extend.h
OLD_FILES+=usr/include/dev/wi/wi_hostap.h
OLD_FILES+=usr/include/disktab.h
OLD_FILES+=usr/include/g++/FlexLexer.h
OLD_FILES+=usr/include/g++/PlotFile.h
OLD_FILES+=usr/include/g++/SFile.h
OLD_FILES+=usr/include/g++/_G_config.h
OLD_FILES+=usr/include/g++/algo.h
OLD_FILES+=usr/include/g++/algobase.h
OLD_FILES+=usr/include/g++/algorithm
OLD_FILES+=usr/include/g++/alloc.h
OLD_FILES+=usr/include/g++/bitset
OLD_FILES+=usr/include/g++/builtinbuf.h
OLD_FILES+=usr/include/g++/bvector.h
OLD_FILES+=usr/include/g++/cassert
OLD_FILES+=usr/include/g++/cctype
OLD_FILES+=usr/include/g++/cerrno
OLD_FILES+=usr/include/g++/cfloat
OLD_FILES+=usr/include/g++/ciso646
OLD_FILES+=usr/include/g++/climits
OLD_FILES+=usr/include/g++/clocale
OLD_FILES+=usr/include/g++/cmath
OLD_FILES+=usr/include/g++/complex
OLD_FILES+=usr/include/g++/complex.h
OLD_FILES+=usr/include/g++/csetjmp
OLD_FILES+=usr/include/g++/csignal
OLD_FILES+=usr/include/g++/cstdarg
OLD_FILES+=usr/include/g++/cstddef
OLD_FILES+=usr/include/g++/cstdio
OLD_FILES+=usr/include/g++/cstdlib
OLD_FILES+=usr/include/g++/cstring
OLD_FILES+=usr/include/g++/ctime
OLD_FILES+=usr/include/g++/cwchar
OLD_FILES+=usr/include/g++/cwctype
OLD_FILES+=usr/include/g++/defalloc.h
OLD_FILES+=usr/include/g++/deque
OLD_FILES+=usr/include/g++/deque.h
OLD_FILES+=usr/include/g++/editbuf.h
OLD_FILES+=usr/include/g++/exception
OLD_FILES+=usr/include/g++/floatio.h
OLD_FILES+=usr/include/g++/fstream
OLD_FILES+=usr/include/g++/fstream.h
OLD_FILES+=usr/include/g++/function.h
OLD_FILES+=usr/include/g++/functional
OLD_FILES+=usr/include/g++/hash_map
OLD_FILES+=usr/include/g++/hash_map.h
OLD_FILES+=usr/include/g++/hash_set
OLD_FILES+=usr/include/g++/hash_set.h
OLD_FILES+=usr/include/g++/hashtable.h
OLD_FILES+=usr/include/g++/heap.h
OLD_FILES+=usr/include/g++/indstream.h
OLD_FILES+=usr/include/g++/iolibio.h
OLD_FILES+=usr/include/g++/iomanip
OLD_FILES+=usr/include/g++/iomanip.h
OLD_FILES+=usr/include/g++/iosfwd
OLD_FILES+=usr/include/g++/iostdio.h
OLD_FILES+=usr/include/g++/iostream
OLD_FILES+=usr/include/g++/iostream.h
OLD_FILES+=usr/include/g++/iostreamP.h
OLD_FILES+=usr/include/g++/istream.h
OLD_FILES+=usr/include/g++/iterator
OLD_FILES+=usr/include/g++/iterator.h
OLD_FILES+=usr/include/g++/libio.h
OLD_FILES+=usr/include/g++/libioP.h
OLD_FILES+=usr/include/g++/list
OLD_FILES+=usr/include/g++/list.h
OLD_FILES+=usr/include/g++/map
OLD_FILES+=usr/include/g++/map.h
OLD_FILES+=usr/include/g++/memory
OLD_FILES+=usr/include/g++/multimap.h
OLD_FILES+=usr/include/g++/multiset.h
OLD_FILES+=usr/include/g++/new
OLD_FILES+=usr/include/g++/new.h
OLD_FILES+=usr/include/g++/numeric
OLD_FILES+=usr/include/g++/ostream.h
OLD_FILES+=usr/include/g++/pair.h
OLD_FILES+=usr/include/g++/parsestream.h
OLD_FILES+=usr/include/g++/pfstream.h
OLD_FILES+=usr/include/g++/procbuf.h
OLD_FILES+=usr/include/g++/pthread_alloc
OLD_FILES+=usr/include/g++/pthread_alloc.h
OLD_FILES+=usr/include/g++/queue
OLD_FILES+=usr/include/g++/rope
OLD_FILES+=usr/include/g++/rope.h
OLD_FILES+=usr/include/g++/ropeimpl.h
OLD_FILES+=usr/include/g++/set
OLD_FILES+=usr/include/g++/set.h
OLD_FILES+=usr/include/g++/slist
OLD_FILES+=usr/include/g++/slist.h
OLD_FILES+=usr/include/g++/sstream
OLD_FILES+=usr/include/g++/stack
OLD_FILES+=usr/include/g++/stack.h
OLD_FILES+=usr/include/g++/std/bastring.cc
OLD_FILES+=usr/include/g++/std/bastring.h
OLD_FILES+=usr/include/g++/std/complext.cc
OLD_FILES+=usr/include/g++/std/complext.h
OLD_FILES+=usr/include/g++/std/dcomplex.h
OLD_FILES+=usr/include/g++/std/fcomplex.h
OLD_FILES+=usr/include/g++/std/gslice.h
OLD_FILES+=usr/include/g++/std/gslice_array.h
OLD_FILES+=usr/include/g++/std/indirect_array.h
OLD_FILES+=usr/include/g++/std/ldcomplex.h
OLD_FILES+=usr/include/g++/std/mask_array.h
OLD_FILES+=usr/include/g++/std/slice.h
OLD_FILES+=usr/include/g++/std/slice_array.h
OLD_FILES+=usr/include/g++/std/std_valarray.h
OLD_FILES+=usr/include/g++/std/straits.h
OLD_FILES+=usr/include/g++/std/valarray_array.h
OLD_FILES+=usr/include/g++/std/valarray_array.tcc
OLD_FILES+=usr/include/g++/std/valarray_meta.h
OLD_FILES+=usr/include/g++/stdexcept
OLD_FILES+=usr/include/g++/stdiostream.h
OLD_FILES+=usr/include/g++/stl.h
OLD_FILES+=usr/include/g++/stl_algo.h
OLD_FILES+=usr/include/g++/stl_algobase.h
OLD_FILES+=usr/include/g++/stl_alloc.h
OLD_FILES+=usr/include/g++/stl_bvector.h
OLD_FILES+=usr/include/g++/stl_config.h
OLD_FILES+=usr/include/g++/stl_construct.h
OLD_FILES+=usr/include/g++/stl_deque.h
OLD_FILES+=usr/include/g++/stl_function.h
OLD_FILES+=usr/include/g++/stl_hash_fun.h
OLD_FILES+=usr/include/g++/stl_hash_map.h
OLD_FILES+=usr/include/g++/stl_hash_set.h
OLD_FILES+=usr/include/g++/stl_hashtable.h
OLD_FILES+=usr/include/g++/stl_heap.h
OLD_FILES+=usr/include/g++/stl_iterator.h
OLD_FILES+=usr/include/g++/stl_list.h
OLD_FILES+=usr/include/g++/stl_map.h
OLD_FILES+=usr/include/g++/stl_multimap.h
OLD_FILES+=usr/include/g++/stl_multiset.h
OLD_FILES+=usr/include/g++/stl_numeric.h
OLD_FILES+=usr/include/g++/stl_pair.h
OLD_FILES+=usr/include/g++/stl_queue.h
OLD_FILES+=usr/include/g++/stl_raw_storage_iter.h
OLD_FILES+=usr/include/g++/stl_relops.h
OLD_FILES+=usr/include/g++/stl_rope.h
OLD_FILES+=usr/include/g++/stl_set.h
OLD_FILES+=usr/include/g++/stl_slist.h
OLD_FILES+=usr/include/g++/stl_stack.h
OLD_FILES+=usr/include/g++/stl_tempbuf.h
OLD_FILES+=usr/include/g++/stl_tree.h
OLD_FILES+=usr/include/g++/stl_uninitialized.h
OLD_FILES+=usr/include/g++/stl_vector.h
OLD_FILES+=usr/include/g++/stream.h
OLD_FILES+=usr/include/g++/streambuf.h
OLD_FILES+=usr/include/g++/strfile.h
OLD_FILES+=usr/include/g++/string
OLD_FILES+=usr/include/g++/strstream
OLD_FILES+=usr/include/g++/strstream.h
OLD_FILES+=usr/include/g++/tempbuf.h
OLD_FILES+=usr/include/g++/tree.h
OLD_FILES+=usr/include/g++/type_traits.h
OLD_FILES+=usr/include/g++/typeinfo
OLD_FILES+=usr/include/g++/utility
OLD_FILES+=usr/include/g++/valarray
OLD_FILES+=usr/include/g++/vector
OLD_FILES+=usr/include/g++/vector.h
OLD_FILES+=usr/include/gmp.h
OLD_FILES+=usr/include/isc/assertions.h
OLD_FILES+=usr/include/isc/ctl.h
OLD_FILES+=usr/include/isc/dst.h
OLD_FILES+=usr/include/isc/eventlib.h
OLD_FILES+=usr/include/isc/heap.h
OLD_FILES+=usr/include/isc/irpmarshall.h
OLD_FILES+=usr/include/isc/list.h
OLD_FILES+=usr/include/isc/logging.h
OLD_FILES+=usr/include/isc/memcluster.h
OLD_FILES+=usr/include/isc/misc.h
OLD_FILES+=usr/include/isc/tree.h
OLD_FILES+=usr/include/machine/ansi.h
OLD_FILES+=usr/include/machine/apic.h
OLD_FILES+=usr/include/machine/asc_ioctl.h
OLD_FILES+=usr/include/machine/asnames.h
OLD_FILES+=usr/include/machine/bus_at386.h
OLD_FILES+=usr/include/machine/bus_memio.h
OLD_FILES+=usr/include/machine/bus_pc98.h
OLD_FILES+=usr/include/machine/bus_pio.h
OLD_FILES+=usr/include/machine/cdk.h
OLD_FILES+=usr/include/machine/comstats.h
OLD_FILES+=usr/include/machine/console.h
OLD_FILES+=usr/include/machine/critical.h
OLD_FILES+=usr/include/machine/cronyx.h
OLD_FILES+=usr/include/machine/dvcfg.h
OLD_FILES+=usr/include/machine/globaldata.h
OLD_FILES+=usr/include/machine/globals.h
OLD_FILES+=usr/include/machine/gsc.h
OLD_FILES+=usr/include/machine/i4b_isppp.h
OLD_FILES+=usr/include/machine/if_wavelan_ieee.h
OLD_FILES+=usr/include/machine/iic.h
OLD_FILES+=usr/include/machine/ioctl_ctx.h
OLD_FILES+=usr/include/machine/ioctl_fd.h
OLD_FILES+=usr/include/machine/ipl.h
OLD_FILES+=usr/include/machine/lock.h
OLD_FILES+=usr/include/machine/mouse.h
OLD_FILES+=usr/include/machine/mpapic.h
OLD_FILES+=usr/include/machine/mtpr.h
OLD_FILES+=usr/include/machine/pc/msdos.h
OLD_FILES+=usr/include/machine/physio_proc.h
OLD_FILES+=usr/include/machine/smb.h
OLD_FILES+=usr/include/machine/spigot.h
OLD_FILES+=usr/include/machine/types.h
OLD_FILES+=usr/include/machine/uc_device.h
OLD_FILES+=usr/include/machine/ultrasound.h
OLD_FILES+=usr/include/machine/wtio.h
OLD_FILES+=usr/include/msdosfs/bootsect.h
OLD_FILES+=usr/include/msdosfs/bpb.h
OLD_FILES+=usr/include/msdosfs/denode.h
OLD_FILES+=usr/include/msdosfs/direntry.h
OLD_FILES+=usr/include/msdosfs/fat.h
OLD_FILES+=usr/include/msdosfs/msdosfsmount.h
OLD_FILES+=usr/include/net/hostcache.h
OLD_FILES+=usr/include/net/if_faith.h
OLD_FILES+=usr/include/net/if_ieee80211.h
OLD_FILES+=usr/include/net/if_tunvar.h
OLD_FILES+=usr/include/net/intrq.h
OLD_FILES+=usr/include/netatm/kern_include.h
OLD_FILES+=usr/include/netinet/if_fddi.h
OLD_FILES+=usr/include/netinet/in_hostcache.h
OLD_FILES+=usr/include/netinet/ip_flow.h
OLD_FILES+=usr/include/netinet/ip_fw2.h
OLD_FILES+=usr/include/netinet6/in6_prefix.h
OLD_FILES+=usr/include/netns/idp.h
OLD_FILES+=usr/include/netns/idp_var.h
OLD_FILES+=usr/include/netns/ns.h
OLD_FILES+=usr/include/netns/ns_error.h
OLD_FILES+=usr/include/netns/ns_if.h
OLD_FILES+=usr/include/netns/ns_pcb.h
OLD_FILES+=usr/include/netns/sp.h
OLD_FILES+=usr/include/netns/spidp.h
OLD_FILES+=usr/include/netns/spp_debug.h
OLD_FILES+=usr/include/netns/spp_timer.h
OLD_FILES+=usr/include/netns/spp_var.h
OLD_FILES+=usr/include/nfs/nfs.h
OLD_FILES+=usr/include/nfs/nfsm_subs.h
OLD_FILES+=usr/include/nfs/nfsmount.h
OLD_FILES+=usr/include/nfs/nfsnode.h
OLD_FILES+=usr/include/nfs/nfsrtt.h
OLD_FILES+=usr/include/nfs/nfsrvcache.h
OLD_FILES+=usr/include/nfs/nfsv2.h
OLD_FILES+=usr/include/nfs/nqnfs.h
OLD_FILES+=usr/include/ntfs/ntfs.h
OLD_FILES+=usr/include/ntfs/ntfs_compr.h
OLD_FILES+=usr/include/ntfs/ntfs_ihash.h
OLD_FILES+=usr/include/ntfs/ntfs_inode.h
OLD_FILES+=usr/include/ntfs/ntfs_subr.h
OLD_FILES+=usr/include/ntfs/ntfs_vfsops.h
OLD_FILES+=usr/include/ntfs/ntfsmount.h
OLD_FILES+=usr/include/nwfs/nwfs.h
OLD_FILES+=usr/include/nwfs/nwfs_mount.h
OLD_FILES+=usr/include/nwfs/nwfs_node.h
OLD_FILES+=usr/include/nwfs/nwfs_subr.h
OLD_FILES+=usr/include/posix4/_semaphore.h
OLD_FILES+=usr/include/posix4/aio.h
OLD_FILES+=usr/include/posix4/ksem.h
OLD_FILES+=usr/include/posix4/mqueue.h
OLD_FILES+=usr/include/posix4/posix4.h
OLD_FILES+=usr/include/posix4/sched.h
OLD_FILES+=usr/include/posix4/semaphore.h
OLD_DIRS+=usr/include/posix4
OLD_FILES+=usr/include/security/_pam_compat.h
OLD_FILES+=usr/include/security/_pam_macros.h
OLD_FILES+=usr/include/security/_pam_types.h
OLD_FILES+=usr/include/security/pam_malloc.h
OLD_FILES+=usr/include/security/pam_misc.h
OLD_FILES+=usr/include/skey.h
OLD_FILES+=usr/include/strhash.h
OLD_FILES+=usr/include/struct.h
OLD_FILES+=usr/include/sys/_label.h
OLD_FILES+=usr/include/sys/_posix.h
OLD_FILES+=usr/include/sys/bus_private.h
OLD_FILES+=usr/include/sys/ccdvar.h
OLD_FILES+=usr/include/sys/diskslice.h
OLD_FILES+=usr/include/sys/dmap.h
OLD_FILES+=usr/include/sys/inttypes.h
OLD_FILES+=usr/include/sys/jumbo.h
OLD_FILES+=usr/include/sys/mac_policy.h
OLD_FILES+=usr/include/sys/pbioio.h
OLD_FILES+=usr/include/sys/syscall-hide.h
OLD_FILES+=usr/include/sys/tprintf.h
OLD_FILES+=usr/include/sys/vnioctl.h
OLD_FILES+=usr/include/sys/wormio.h
OLD_FILES+=usr/include/telnet.h
OLD_FILES+=usr/include/ufs/mfs/mfs_extern.h
OLD_FILES+=usr/include/ufs/mfs/mfsnode.h
OLD_FILES+=usr/include/values.h
OLD_FILES+=usr/include/vm/vm_zone.h
OLD_FILES+=usr/share/examples/etc/usbd.conf
OLD_FILES+=usr/share/examples/meteor/README
OLD_FILES+=usr/share/examples/meteor/rgb16.c
OLD_FILES+=usr/share/examples/meteor/rgb24.c
OLD_FILES+=usr/share/examples/meteor/test-n.c
OLD_FILES+=usr/share/examples/meteor/yuvpk.c
OLD_FILES+=usr/share/examples/meteor/yuvpl.c
OLD_FILES+=usr/share/examples/worm/README
OLD_FILES+=usr/share/examples/worm/makecdfs.sh
OLD_FILES+=usr/share/groff_font/devlj4/Makefile
OLD_FILES+=usr/share/groff_font/devlj4/text.map
OLD_FILES+=usr/share/groff_font/devlj4/special.map
OLD_FILES+=usr/share/misc/nslookup.help
OLD_FILES+=usr/share/sendmail/cf/feature/nodns.m4
OLD_FILES+=usr/share/syscons/keymaps/lat-amer.kbd
OLD_FILES+=usr/share/vi/catalog/ru_SU.KOI8-R
OLD_FILES+=usr/share/zoneinfo/SystemV/YST9
OLD_FILES+=usr/share/zoneinfo/SystemV/PST8
OLD_FILES+=usr/share/zoneinfo/SystemV/EST5EDT
OLD_FILES+=usr/share/zoneinfo/SystemV/CST6CDT
OLD_FILES+=usr/share/zoneinfo/SystemV/MST7MDT
OLD_FILES+=usr/share/zoneinfo/SystemV/PST8PDT
OLD_FILES+=usr/share/zoneinfo/SystemV/YST9YDT
OLD_FILES+=usr/share/zoneinfo/SystemV/HST10
OLD_FILES+=usr/share/zoneinfo/SystemV/MST7
OLD_FILES+=usr/share/zoneinfo/SystemV/EST5
OLD_FILES+=usr/share/zoneinfo/SystemV/AST4ADT
OLD_FILES+=usr/share/zoneinfo/SystemV/CST6
OLD_FILES+=usr/share/zoneinfo/SystemV/AST4
OLD_DIRS+=usr/share/zoneinfo/SystemV
OLD_FILES+=usr/share/doc/ntp/accopt.htm
OLD_FILES+=usr/share/doc/ntp/assoc.htm
OLD_FILES+=usr/share/doc/ntp/audio.htm
OLD_FILES+=usr/share/doc/ntp/authopt.htm
OLD_FILES+=usr/share/doc/ntp/biblio.htm
OLD_FILES+=usr/share/doc/ntp/build.htm
OLD_FILES+=usr/share/doc/ntp/clockopt.htm
OLD_FILES+=usr/share/doc/ntp/config.htm
OLD_FILES+=usr/share/doc/ntp/confopt.htm
OLD_FILES+=usr/share/doc/ntp/copyright.htm
OLD_FILES+=usr/share/doc/ntp/debug.htm
OLD_FILES+=usr/share/doc/ntp/driver1.htm
OLD_FILES+=usr/share/doc/ntp/driver10.htm
OLD_FILES+=usr/share/doc/ntp/driver11.htm
OLD_FILES+=usr/share/doc/ntp/driver12.htm
OLD_FILES+=usr/share/doc/ntp/driver16.htm
OLD_FILES+=usr/share/doc/ntp/driver18.htm
OLD_FILES+=usr/share/doc/ntp/driver19.htm
OLD_FILES+=usr/share/doc/ntp/driver2.htm
OLD_FILES+=usr/share/doc/ntp/driver20.htm
OLD_FILES+=usr/share/doc/ntp/driver22.htm
OLD_FILES+=usr/share/doc/ntp/driver23.htm
OLD_FILES+=usr/share/doc/ntp/driver24.htm
OLD_FILES+=usr/share/doc/ntp/driver26.htm
OLD_FILES+=usr/share/doc/ntp/driver27.htm
OLD_FILES+=usr/share/doc/ntp/driver28.htm
OLD_FILES+=usr/share/doc/ntp/driver29.htm
OLD_FILES+=usr/share/doc/ntp/driver3.htm
OLD_FILES+=usr/share/doc/ntp/driver30.htm
OLD_FILES+=usr/share/doc/ntp/driver32.htm
OLD_FILES+=usr/share/doc/ntp/driver33.htm
OLD_FILES+=usr/share/doc/ntp/driver34.htm
OLD_FILES+=usr/share/doc/ntp/driver35.htm
OLD_FILES+=usr/share/doc/ntp/driver36.htm
OLD_FILES+=usr/share/doc/ntp/driver37.htm
OLD_FILES+=usr/share/doc/ntp/driver4.htm
OLD_FILES+=usr/share/doc/ntp/driver5.htm
OLD_FILES+=usr/share/doc/ntp/driver6.htm
OLD_FILES+=usr/share/doc/ntp/driver7.htm
OLD_FILES+=usr/share/doc/ntp/driver8.htm
OLD_FILES+=usr/share/doc/ntp/driver9.htm
OLD_FILES+=usr/share/doc/ntp/exec.htm
OLD_FILES+=usr/share/doc/ntp/extern.htm
OLD_FILES+=usr/share/doc/ntp/gadget.htm
OLD_FILES+=usr/share/doc/ntp/hints.htm
OLD_FILES+=usr/share/doc/ntp/howto.htm
OLD_FILES+=usr/share/doc/ntp/htmlprimer.htm
OLD_FILES+=usr/share/doc/ntp/index.htm
OLD_FILES+=usr/share/doc/ntp/kern.htm
OLD_FILES+=usr/share/doc/ntp/kernpps.htm
OLD_FILES+=usr/share/doc/ntp/ldisc.htm
OLD_FILES+=usr/share/doc/ntp/measure.htm
OLD_FILES+=usr/share/doc/ntp/miscopt.htm
OLD_FILES+=usr/share/doc/ntp/monopt.htm
OLD_FILES+=usr/share/doc/ntp/mx4200data.htm
OLD_FILES+=usr/share/doc/ntp/notes.htm
OLD_FILES+=usr/share/doc/ntp/ntpd.htm
OLD_FILES+=usr/share/doc/ntp/ntpdate.htm
OLD_FILES+=usr/share/doc/ntp/ntpdc.htm
OLD_FILES+=usr/share/doc/ntp/ntpq.htm
OLD_FILES+=usr/share/doc/ntp/ntptime.htm
OLD_FILES+=usr/share/doc/ntp/ntptrace.htm
OLD_FILES+=usr/share/doc/ntp/parsedata.htm
OLD_FILES+=usr/share/doc/ntp/parsenew.htm
OLD_FILES+=usr/share/doc/ntp/patches.htm
OLD_FILES+=usr/share/doc/ntp/porting.htm
OLD_FILES+=usr/share/doc/ntp/pps.htm
OLD_FILES+=usr/share/doc/ntp/prefer.htm
OLD_FILES+=usr/share/doc/ntp/qth.htm
OLD_FILES+=usr/share/doc/ntp/quick.htm
OLD_FILES+=usr/share/doc/ntp/rdebug.htm
OLD_FILES+=usr/share/doc/ntp/refclock.htm
OLD_FILES+=usr/share/doc/ntp/release.htm
OLD_FILES+=usr/share/doc/ntp/tickadj.htm
OLD_FILES+=usr/share/doc/papers/nqnfs.ascii.gz
OLD_FILES+=usr/share/doc/papers/px.ascii.gz
OLD_FILES+=usr/share/man/man3/exp10.3.gz
OLD_FILES+=usr/share/man/man3/exp10f.3.gz
OLD_FILES+=usr/share/man/man3/fpsetsticky.3.gz
OLD_FILES+=usr/share/man/man3/gss_krb5_compat_des3_mic.3.gz
OLD_FILES+=usr/share/man/man3/gss_krb5_copy_ccache.3.gz
OLD_FILES+=usr/share/man/man3/mac_is_present_np.3.gz
OLD_FILES+=usr/share/man/man3/mbmb.3.gz
OLD_FILES+=usr/share/man/man3/setrunelocale.3.gz
OLD_FILES+=usr/share/man/man5/usbd.conf.5.gz
.if ${TARGET_ARCH} != "i386" && ${TARGET_ARCH} != "amd64"
OLD_FILES+=usr/share/man/man8/boot_i386.8.gz
.endif
.if ${TARGET_ARCH} != "aarch64" && ${TARGET} != "arm" && \
${TARGET_ARCH} != "powerpc" && ${TARGET_ARCH} != "powerpc64" && \
${TARGET_ARCH} != "sparc64" && ${TARGET} != "mips"
OLD_FILES+=usr/share/man/man8/ofwdump.8.gz
.endif
OLD_FILES+=usr/share/man/man8/mount_reiserfs.8.gz
OLD_FILES+=usr/share/man/man9/VFS_START.9.gz
OLD_FILES+=usr/share/man/man9/cpu_critical_exit.9.gz
OLD_FILES+=usr/share/man/man9/cpu_critical_enter.9.gz
OLD_FILES+=usr/share/info/annotate.info.gz
OLD_FILES+=usr/share/info/tar.info.gz
OLD_FILES+=usr/share/bsnmp/defs/tree.def
OLD_FILES+=usr/share/bsnmp/defs/mibII_tree.def
OLD_FILES+=usr/share/bsnmp/defs/netgraph_tree.def
OLD_FILES+=usr/share/bsnmp/mibs/FOKUS-MIB.txt
OLD_FILES+=usr/share/bsnmp/mibs/BEGEMOT-MIB.txt
OLD_FILES+=usr/share/bsnmp/mibs/BEGEMOT-SNMPD.txt
OLD_FILES+=usr/share/bsnmp/mibs/BEGEMOT-NETGRAPH.txt
OLD_FILES+=usr/libdata/msdosfs/iso22dos
OLD_FILES+=usr/libdata/msdosfs/iso72dos
OLD_FILES+=usr/libdata/msdosfs/koi2dos
OLD_FILES+=usr/libdata/msdosfs/koi8u2dos
# The following files are *not* obsolete, they just don't get touched at
# install, so don't add them:
# - boot/loader.rc
# - usr/share/tmac/man.local
# - usr/share/tmac/mm/locale
# - usr/share/tmac/mm/se_locale
# - var/yp/Makefile
# Early entries split OLD_FILES, OLD_LIBS, and OLD_DIRS into separate sections
# in this file, but this practice was abandoned in the mid-2000s.
#
# 20071120: shared library version bump
OLD_LIBS+=usr/lib/libasn1.so.8
OLD_LIBS+=usr/lib/libgssapi.so.8
OLD_LIBS+=usr/lib/libgssapi_krb5.so.8
OLD_LIBS+=usr/lib/libhdb.so.8
OLD_LIBS+=usr/lib/libkadm5clnt.so.8
OLD_LIBS+=usr/lib/libkadm5srv.so.8
OLD_LIBS+=usr/lib/libkafs5.so.8
OLD_LIBS+=usr/lib/libkrb5.so.8
OLD_LIBS+=usr/lib/libobjc.so.2
# 20070519: GCC 4.2
OLD_FILES+=usr/lib/libg2c.a
OLD_FILES+=usr/lib/libg2c.so
OLD_LIBS+=usr/lib/libg2c.so.2
OLD_FILES+=usr/lib/libg2c_p.a
OLD_FILES+=usr/lib/libgcc_pic.a
# 20060729: OpenSSL 0.9.7e -> 0.9.8b upgrade
OLD_LIBS+=lib/libcrypto.so.4
OLD_LIBS+=usr/lib/libssl.so.4
# 20060521: gethostbyaddr(3) ABI change
OLD_LIBS+=usr/lib/libroken.so.8
OLD_LIBS+=lib/libatm.so.3
OLD_LIBS+=lib/libc.so.6
OLD_LIBS+=lib/libutil.so.5
# 20060413: shared library moved to /usr/lib
MOVED_LIBS+=lib/libgpib.so.1
# 20060413: libpcap.so.4 moved to /lib/
MOVED_LIBS+=usr/lib/libpcap.so.4
# 20060412: libpthread.so.2 moved to /lib/
MOVED_LIBS+=usr/lib/libpthread.so.2
# 20060127: revert libdisk to static-only
OLD_LIBS+=usr/lib/libdisk.so.3
# 20051027: libc_r discontinued (removed 20101113)
OLD_FILES+=usr/lib/libc_r.a
OLD_FILES+=usr/lib/libc_r.so
OLD_LIBS+=usr/lib/libc_r.so.7
OLD_FILES+=usr/lib/libc_r_p.a
# 20050722: bump for 6.0-RELEASE
OLD_LIBS+=lib/libalias.so.4
OLD_LIBS+=lib/libatm.so.2
OLD_LIBS+=lib/libbegemot.so.1
OLD_LIBS+=lib/libbsdxml.so.1
OLD_LIBS+=lib/libbsnmp.so.2
OLD_LIBS+=lib/libc.so.5
OLD_LIBS+=lib/libcam.so.2
OLD_LIBS+=lib/libcrypt.so.2
OLD_LIBS+=lib/libcrypto.so.3
OLD_LIBS+=lib/libdevstat.so.4
OLD_LIBS+=lib/libedit.so.4
OLD_LIBS+=lib/libgeom.so.2
OLD_LIBS+=lib/libgpib.so.0
OLD_LIBS+=lib/libipsec.so.1
OLD_LIBS+=lib/libipx.so.2
OLD_LIBS+=lib/libkiconv.so.1
OLD_LIBS+=lib/libkvm.so.2
OLD_LIBS+=lib/libm.so.3
OLD_LIBS+=lib/libmd.so.2
OLD_LIBS+=lib/libncurses.so.5
OLD_LIBS+=lib/libreadline.so.5
OLD_LIBS+=lib/libsbuf.so.2
OLD_LIBS+=lib/libufs.so.2
OLD_LIBS+=lib/libutil.so.4
OLD_LIBS+=lib/libz.so.2
OLD_LIBS+=usr/lib/libarchive.so.1
OLD_LIBS+=usr/lib/libasn1.so.7
OLD_LIBS+=usr/lib/libbluetooth.so.1
OLD_LIBS+=usr/lib/libbz2.so.1
OLD_LIBS+=usr/lib/libc_r.so.5
OLD_LIBS+=usr/lib/libcalendar.so.2
OLD_LIBS+=usr/lib/libcom_err.so.2
OLD_LIBS+=usr/lib/libdevinfo.so.2
OLD_LIBS+=usr/lib/libdialog.so.4
OLD_LIBS+=usr/lib/libfetch.so.3
OLD_LIBS+=usr/lib/libform.so.2
OLD_LIBS+=usr/lib/libftpio.so.5
OLD_LIBS+=usr/lib/libg2c.so.1
OLD_LIBS+=usr/lib/libgnuregex.so.2
OLD_LIBS+=usr/lib/libgssapi.so.7
OLD_LIBS+=usr/lib/libhdb.so.7
OLD_LIBS+=usr/lib/libhistory.so.5
OLD_LIBS+=usr/lib/libkadm5clnt.so.7
OLD_LIBS+=usr/lib/libkadm5srv.so.7
OLD_LIBS+=usr/lib/libkafs5.so.7
OLD_LIBS+=usr/lib/libkrb5.so.7
OLD_LIBS+=usr/lib/libmagic.so.1
OLD_LIBS+=usr/lib/libmenu.so.2
OLD_LIBS+=usr/lib/libmilter.so.2
OLD_LIBS+=usr/lib/libmp.so.4
OLD_LIBS+=usr/lib/libncp.so.1
OLD_LIBS+=usr/lib/libnetgraph.so.1
OLD_LIBS+=usr/lib/libngatm.so.1
OLD_LIBS+=usr/lib/libobjc.so.1
OLD_LIBS+=usr/lib/libopie.so.3
OLD_LIBS+=usr/lib/libpam.so.2
OLD_LIBS+=usr/lib/libpanel.so.2
OLD_LIBS+=usr/lib/libpcap.so.3
OLD_LIBS+=usr/lib/libpmc.so.2
OLD_LIBS+=usr/lib/libpthread.so.1
OLD_LIBS+=usr/lib/libradius.so.1
OLD_LIBS+=usr/lib/libroken.so.7
OLD_LIBS+=usr/lib/librpcsvc.so.2
OLD_LIBS+=usr/lib/libsdp.so.1
OLD_LIBS+=usr/lib/libsmb.so.1
OLD_LIBS+=usr/lib/libssh.so.2
OLD_LIBS+=usr/lib/libssl.so.3
OLD_LIBS+=usr/lib/libstdc++.so.4
OLD_LIBS+=usr/lib/libtacplus.so.1
OLD_LIBS+=usr/lib/libthr.so.1
OLD_LIBS+=usr/lib/libthread_db.so.1
OLD_LIBS+=usr/lib/libugidfw.so.1
OLD_LIBS+=usr/lib/libusbhid.so.1
OLD_LIBS+=usr/lib/libvgl.so.3
OLD_LIBS+=usr/lib/libwrap.so.3
OLD_LIBS+=usr/lib/libypclnt.so.1
OLD_LIBS+=usr/lib/pam_chroot.so.2
OLD_LIBS+=usr/lib/pam_deny.so.2
OLD_LIBS+=usr/lib/pam_echo.so.2
OLD_LIBS+=usr/lib/pam_exec.so.2
OLD_LIBS+=usr/lib/pam_ftpusers.so.2
OLD_LIBS+=usr/lib/pam_group.so.2
OLD_LIBS+=usr/lib/pam_guest.so.2
OLD_LIBS+=usr/lib/pam_krb5.so.2
OLD_LIBS+=usr/lib/pam_ksu.so.2
OLD_LIBS+=usr/lib/pam_lastlog.so.2
OLD_LIBS+=usr/lib/pam_login_access.so.2
OLD_LIBS+=usr/lib/pam_nologin.so.2
OLD_LIBS+=usr/lib/pam_opie.so.2
OLD_LIBS+=usr/lib/pam_opieaccess.so.2
OLD_LIBS+=usr/lib/pam_passwdqc.so.2
OLD_LIBS+=usr/lib/pam_permit.so.2
OLD_LIBS+=usr/lib/pam_radius.so.2
OLD_LIBS+=usr/lib/pam_rhosts.so.2
OLD_LIBS+=usr/lib/pam_rootok.so.2
OLD_LIBS+=usr/lib/pam_securetty.so.2
OLD_LIBS+=usr/lib/pam_self.so.2
OLD_LIBS+=usr/lib/pam_ssh.so.2
OLD_LIBS+=usr/lib/pam_tacplus.so.2
OLD_LIBS+=usr/lib/pam_unix.so.2
OLD_LIBS+=usr/lib/snmp_atm.so.3
OLD_LIBS+=usr/lib/snmp_mibII.so.3
OLD_LIBS+=usr/lib/snmp_netgraph.so.3
OLD_LIBS+=usr/lib/snmp_pf.so.3
# 200505XX: ?
OLD_LIBS+=usr/lib/snmp_atm.so.2
OLD_LIBS+=usr/lib/snmp_mibII.so.2
OLD_LIBS+=usr/lib/snmp_netgraph.so.2
OLD_LIBS+=usr/lib/snmp_pf.so.2
# 2005XXXX: not ready for primetime yet
OLD_LIBS+=usr/lib/libautofs.so.1
# 200411XX: libxpg4 removal
OLD_LIBS+=usr/lib/libxpg4.so.3
# 200410XX: libm compatibility fix
OLD_LIBS+=lib/libm.so.2
# 20041001: version bump
OLD_LIBS+=lib/libreadline.so.4
OLD_LIBS+=usr/lib/libhistory.so.4
OLD_LIBS+=usr/lib/libopie.so.2
OLD_LIBS+=usr/lib/libpcap.so.2
# 20040925: bind9 import
OLD_LIBS+=usr/lib/libisc.so.1
# 200408XX
OLD_LIBS+=usr/lib/snmp_netgraph.so.1
# 200404XX
OLD_LIBS+=usr/lib/libsnmp.so.1
OLD_LIBS+=usr/lib/snmp_mibII.so.1
# 200309XX
OLD_LIBS+=usr/lib/libasn1.so.6
OLD_LIBS+=usr/lib/libhdb.so.6
OLD_LIBS+=usr/lib/libkadm5clnt.so.6
OLD_LIBS+=usr/lib/libkadm5srv.so.6
OLD_LIBS+=usr/lib/libkrb5.so.6
OLD_LIBS+=usr/lib/libroken.so.6
# 200304XX
OLD_LIBS+=usr/lib/libc.so.4
OLD_LIBS+=usr/lib/libc_r.so.4
OLD_LIBS+=usr/lib/libdevstat.so.2
OLD_LIBS+=usr/lib/libedit.so.3
OLD_LIBS+=usr/lib/libgmp.so.3
OLD_LIBS+=usr/lib/libmp.so.3
OLD_LIBS+=usr/lib/libpam.so.1
OLD_LIBS+=usr/lib/libposix1e.so.2
OLD_LIBS+=usr/lib/libskey.so.2
OLD_LIBS+=usr/lib/libusbhid.so.0
OLD_LIBS+=usr/lib/libvgl.so.2
# 20030218: OpenSSL 0.9.7 import
OLD_FILES+=usr/include/des.h
OLD_FILES+=usr/lib/libdes.a
OLD_FILES+=usr/lib/libdes.so
OLD_LIBS+=usr/lib/libdes.so.3
OLD_FILES+=usr/lib/libdes_p.a
# 200302XX
OLD_LIBS+=usr/lib/libacl.so.3
OLD_LIBS+=usr/lib/libasn1.so.5
OLD_LIBS+=usr/lib/libcrypto.so.2
OLD_LIBS+=usr/lib/libgssapi.so.5
OLD_LIBS+=usr/lib/libhdb.so.5
OLD_LIBS+=usr/lib/libkadm.so.3
OLD_LIBS+=usr/lib/libkadm5clnt.so.5
OLD_LIBS+=usr/lib/libkadm5srv.so.5
OLD_LIBS+=usr/lib/libkafs.so.3
OLD_LIBS+=usr/lib/libkafs5.so.5
OLD_LIBS+=usr/lib/libkdb.so.3
OLD_LIBS+=usr/lib/libkrb.so.3
OLD_LIBS+=usr/lib/libroken.so.5
OLD_LIBS+=usr/lib/libssl.so.2
OLD_LIBS+=usr/lib/pam_kerberosIV.so
# 200208XX
OLD_LIBS+=usr/lib/libgssapi.so.4
# 200203XX
OLD_LIBS+=usr/lib/libss.so.3
OLD_LIBS+=usr/lib/libusb.so.0
# 200112XX
OLD_LIBS+=usr/lib/libfetch.so.2
# 200110XX
OLD_LIBS+=usr/lib/libgssapi.so.3
# 200104XX
OLD_LIBS+=usr/lib/libdescrypt.so.2
OLD_LIBS+=usr/lib/libscrypt.so.2
# 200102XX
OLD_LIBS+=usr/lib/libcrypto.so.1
OLD_LIBS+=usr/lib/libssl.so.1
# 200009XX
OLD_LIBS+=usr/lib/libRSAglue.so.1
OLD_LIBS+=usr/lib/librsaINTL.so.1
OLD_LIBS+=usr/lib/librsaUSA.so.1
# 200006XX
OLD_LIBS+=usr/lib/libalias.so.3
OLD_LIBS+=usr/lib/libfetch.so.1
OLD_LIBS+=usr/lib/libipsec.so.0
# 200005XX
OLD_LIBS+=usr/lib/libxpg4.so.2
# 200002XX
OLD_LIBS+=usr/lib/libc.so.3
OLD_LIBS+=usr/lib/libcurses.so.2
OLD_LIBS+=usr/lib/libdialog.so.3
OLD_LIBS+=usr/lib/libedit.so.2
OLD_LIBS+=usr/lib/libf2c.so.2
OLD_LIBS+=usr/lib/libftpio.so.4
OLD_LIBS+=usr/lib/libg++.so.4
OLD_LIBS+=usr/lib/libhistory.so.3
OLD_LIBS+=usr/lib/libmytinfo.so.2
OLD_LIBS+=usr/lib/libncurses.so.3
OLD_LIBS+=usr/lib/libreadline.so.3
OLD_LIBS+=usr/lib/libss.so.2
OLD_LIBS+=usr/lib/libtermcap.so.2
OLD_LIBS+=usr/lib/libutil.so.2
OLD_LIBS+=usr/lib/libvgl.so.1
OLD_LIBS+=usr/lib/libwrap.so.2
# ???
OLD_LIBS+=usr/lib/libarchive.so.2
OLD_LIBS+=usr/lib/libbsnmp.so.1
OLD_LIBS+=usr/lib/libc_r.so.6
OLD_LIBS+=usr/lib/libcipher.so.2
OLD_LIBS+=usr/lib/libgssapi.so.6
OLD_LIBS+=usr/lib/libkse.so.1
OLD_LIBS+=usr/lib/liblwres.so.3
OLD_LIBS+=usr/lib/pam_ftp.so.2
# 20040925: bind9 import
OLD_DIRS+=usr/share/doc/bind/html
OLD_DIRS+=usr/share/doc/bind/misc
OLD_DIRS+=usr/share/doc/bind/
# ???
OLD_DIRS+=usr/include/g++/std
OLD_DIRS+=usr/include/msdosfs
OLD_DIRS+=usr/include/ntfs
OLD_DIRS+=usr/include/nwfs
OLD_DIRS+=usr/include/ufs/mfs
# 20011001: UUCP migration to ports
OLD_DIRS+=usr/libexec/uucp
.include "tools/build/mk/OptionalObsoleteFiles.inc"
diff --git a/cddl/lib/libzfs/Makefile b/cddl/lib/libzfs/Makefile
index f96ba4f24e5d..6f8965e4c14a 100644
--- a/cddl/lib/libzfs/Makefile
+++ b/cddl/lib/libzfs/Makefile
@@ -1,106 +1,107 @@
.PATH: ${SRCTOP}/sys/contrib/openzfs/module/icp
.PATH: ${SRCTOP}/sys/contrib/openzfs/module/zcommon
.PATH: ${SRCTOP}/sys/contrib/openzfs/lib/libzfs
.PATH: ${SRCTOP}/sys/contrib/openzfs/lib/libzfs/os/freebsd
.PATH: ${SRCTOP}/sys/contrib/openzfs/lib/libshare
.PATH: ${SRCTOP}/sys/contrib/openzfs/include
.PATH: ${SRCTOP}/sys/contrib/openzfs/module/zstd
.PATH: ${SRCTOP}/sys/contrib/openzfs/module/zstd/lib
PACKAGE= zfs
LIB= zfs
LIBADD= \
avl \
bsdxml \
crypto \
geom \
m \
md \
nvpair \
pthread \
+ rt \
umem \
util \
uutil \
z \
zfs_core \
zutil
INCS= libzfs.h
USER_C = \
libzfs_changelist.c \
libzfs_config.c \
libzfs_crypto.c \
libzfs_dataset.c \
libzfs_diff.c \
libzfs_import.c \
libzfs_iter.c \
libzfs_mount.c \
libzfs_pool.c \
libzfs_sendrecv.c \
libzfs_status.c \
libzfs_util.c
# FreeBSD
USER_C += \
libzfs_compat.c \
libzfs_zmount.c
# libshare
USER_C += \
libshare.c \
nfs.c \
os/freebsd/nfs.c \
os/freebsd/smb.c
KERNEL_C = \
cityhash.c \
zfeature_common.c \
zfs_comutil.c \
zfs_deleg.c \
zfs_fletcher.c \
zfs_fletcher_superscalar.c \
zfs_fletcher_superscalar4.c \
zfs_namecheck.c \
zfs_prop.c \
zpool_prop.c \
zprop_common.c
ARCH_C =
.if ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "i386"
ARCH_C += zfs_fletcher_intel.c \
zfs_fletcher_sse.c
CFLAGS += -DHAVE_SSE2
.endif
.if ${MACHINE_ARCH} == "amd64"
ARCH_C += zfs_fletcher_avx512.c
CFLAGS+= -DHAVE_AVX2 -DHAVE_AVX -D__x86_64 -DHAVE_AVX512F
.endif
.if ${MACHINE_CPUARCH} == "aarch64"
ARCH_C += zfs_fletcher_aarch64_neon.c
.endif
SRCS= $(USER_C) $(KERNEL_C) $(ARCH_C)
WARNS?= 2
SHLIB_MAJOR= 4
CSTD= c99
CFLAGS+= -DIN_BASE
CFLAGS+= -I${SRCTOP}/sys/contrib/openzfs/include
CFLAGS+= -I${SRCTOP}/sys/contrib/openzfs/include/os/freebsd
CFLAGS+= -I${SRCTOP}/sys/contrib/openzfs/lib/libspl/include
CFLAGS+= -I${SRCTOP}/sys/contrib/openzfs/lib/libspl/include/os/freebsd
CFLAGS+= -I${SRCTOP}/sys/contrib/openzfs/lib/libshare
CFLAGS+= -I${SRCTOP}/sys/contrib/ck/include
CFLAGS+= -I${SRCTOP}/sys
CFLAGS+= -I${SRCTOP}/cddl/compat/opensolaris/include
CFLAGS+= -I${SRCTOP}/sys/contrib/openzfs/module/icp/include
CFLAGS+= -include ${SRCTOP}/sys/contrib/openzfs/include/os/freebsd/spl/sys/ccompile.h
CFLAGS+= -DHAVE_ISSETUGID
CFLAGS+= -include ${SRCTOP}/sys/modules/zfs/zfs_config.h
CFLAGS+= -DSYSCONFDIR=\"/etc\"
CFLAGS+= -DPKGDATADIR=\"/usr/share/zfs\"
.include <bsd.lib.mk>
diff --git a/cddl/lib/libzfs/Makefile.depend b/cddl/lib/libzfs/Makefile.depend
index 60fad1090483..1fead4b4f29d 100644
--- a/cddl/lib/libzfs/Makefile.depend
+++ b/cddl/lib/libzfs/Makefile.depend
@@ -1,29 +1,30 @@
# Autogenerated - do NOT edit!
DIRDEPS = \
cddl/lib/libavl \
cddl/lib/libnvpair \
cddl/lib/libumem \
cddl/lib/libuutil \
cddl/lib/libzfs_core \
cddl/lib/libzutil \
include \
include/xlocale \
lib/${CSU_DIR} \
lib/libc \
lib/libcompiler_rt \
lib/libexpat \
lib/libgeom \
lib/libmd \
+ lib/librt \
lib/libthr \
lib/libutil \
lib/libz \
lib/msun \
secure/lib/libcrypto \
.include <dirdeps.mk>
.if ${DEP_RELDIR} == ${_DEP_RELDIR}
# local dependencies - needed for -jN in clean tree
.endif
diff --git a/lib/librt/Makefile b/lib/librt/Makefile
index 9a54c3ea2812..c755bb123c73 100644
--- a/lib/librt/Makefile
+++ b/lib/librt/Makefile
@@ -1,26 +1,27 @@
+SHLIBDIR?=/lib
.include <src.opts.mk>
PACKAGE=clibs
LIB=rt
SHLIB_MAJOR= 1
CFLAGS+=-I${SRCTOP}/lib/libc/include -I${.CURDIR}
.ifndef NO_THREAD_STACK_UNWIND
CFLAGS+=-fexceptions
.endif
CFLAGS+=-Wall
LIBADD= pthread
WARNS?= 2
SRCS+= aio.c mq.c sigev_thread.c timer.c
PRECIOUSLIB=
VERSION_DEF=${SRCTOP}/lib/libc/Versions.def
SYMBOL_MAPS=${.CURDIR}/Symbol.map
HAS_TESTS=
SUBDIR.${MK_TESTS}+= tests
.include <bsd.lib.mk>
diff --git a/rescue/rescue/Makefile b/rescue/rescue/Makefile
index 9eb41eecb1ca..0283aed9391c 100644
--- a/rescue/rescue/Makefile
+++ b/rescue/rescue/Makefile
@@ -1,270 +1,270 @@
# @(#)Makefile 8.1 (Berkeley) 6/2/93
.include <src.opts.mk>
.include <bsd.linker.mk>
PACKAGE=rescue
MAN=
MK_SSP= no
# Static-PIE is not supported so we should not be linking against _pie.a libs.
# This is also needed to avoid linking against sanitizer-instrumented libraries
# since MK_ASAN/MK_UBSAN will instrument the .pieo object files.
MK_PIE= no
NO_SHARED= yes
CRUNCH_BUILDOPTS+= MK_PIE=no NO_SHARED=yes
# lld >= 16 became more strict about multiply defined symbols. Since there are
# many of those in crunchgen'd programs, turn off the check.
.if ${LINKER_TYPE} == "lld" && ${LINKER_VERSION} >= 160000
LDFLAGS+= -Wl,--allow-multiple-definition
.endif
PROG= rescue
BINDIR?=/rescue
# Shell scripts need #! line to be edited from /bin/sh to /rescue/sh
SCRIPTS= nextboot_FIXED
SCRIPTSNAME_nextboot_FIXED= nextboot
nextboot_FIXED: ../../sbin/reboot/nextboot.sh
sed '1s/\/bin\//\/rescue\//' ${.ALLSRC} > ${.TARGET}
CLEANFILES+= nextboot_FIXED
SCRIPTS+= dhclient_FIXED
SCRIPTSNAME_dhclient_FIXED= dhclient-script
dhclient_FIXED: ../../sbin/dhclient/dhclient-script
sed '1s/\/bin\//\/rescue\//' ${.ALLSRC} > ${.TARGET}
CLEANFILES+= dhclient_FIXED
# The help which used to be here is now in mk/bsd.crunchgen.mk
# Define Makefile variable RESCUE
CRUNCH_BUILDOPTS+= -DRESCUE
# Define compile-time RESCUE symbol when compiling components
CRUNCH_BUILDOPTS+= CRUNCH_CFLAGS=-DRESCUE
# An experiment that failed: try overriding bsd.lib.mk and bsd.prog.mk
# rather than incorporating rescue-specific logic into standard files.
#MAKEFLAGS= -m ${.CURDIR} ${.MAKEFLAGS}
# Hackery: 'librescue' exists merely as a tool for appropriately
# recompiling specific library entries. We _know_ they're needed, and
# regular archive searching creates ugly library ordering problems.
# Easiest fix: tell the linker to include them into the executable
# first, so they are guaranteed to override the regular lib entries.
# Note that if 'librescue' hasn't been compiled, we'll just get the
# regular lib entries from libc and friends.
CRUNCH_LIBS+= ${.OBJDIR}/../librescue/*.o
###################################################################
# Programs from stock /bin
#
# WARNING: Changing this list may require adjusting
# /usr/include/paths.h as well! You were warned!
#
CRUNCH_SRCDIRS+= bin
CRUNCH_PROGS_bin= cat chflags chio chmod cp date dd df echo \
ed expr getfacl hostname kenv kill ln ls mkdir mv \
pkill ps pwd realpath rm rmdir setfacl sh sleep stty \
sync test
CRUNCH_LIBS+= -lcrypt -ledit -ljail -lkvm -lelf -ltermcapw -lutil -lxo
CRUNCH_BUILDTOOLS+= bin/sh
# Additional options for specific programs
CRUNCH_ALIAS_test= [
CRUNCH_ALIAS_sh= -sh
# The -sh alias shouldn't appear in /rescue as a hard link
CRUNCH_SUPPRESS_LINK_-sh= 1
CRUNCH_ALIAS_ln= link
CRUNCH_ALIAS_rm= unlink
CRUNCH_ALIAS_ed= red
CRUNCH_ALIAS_pkill= pgrep
.if ${MK_TCSH} != "no"
CRUNCH_PROGS_bin+= csh
CRUNCH_ALIAS_csh= -csh tcsh -tcsh
CRUNCH_BUILDTOOLS+= bin/csh
CRUNCH_SUPPRESS_LINK_-csh= 1
CRUNCH_SUPPRESS_LINK_-tcsh= 1
.endif
###################################################################
# Programs from standard /sbin
#
# WARNING: Changing this list may require adjusting
# /usr/include/paths.h as well! You were warned!
#
# Note that mdmfs have their own private 'pathnames.h'
# headers in addition to the standard 'paths.h' header.
#
CRUNCH_SRCDIRS+= sbin
CRUNCH_PROGS_sbin= \
camcontrol clri devfs dmesg dump \
dumpfs dumpon fsck fsck_ffs fsck_msdosfs fsdb \
fsirand gbde geom ifconfig init \
kldconfig kldload kldstat kldunload ldconfig \
md5 mdconfig mdmfs mknod mount mount_cd9660 \
mount_msdosfs mount_nfs mount_nullfs \
mount_udf mount_unionfs newfs \
newfs_msdos nos-tun reboot \
restore rcorder route savecore \
shutdown swapon sysctl tunefs umount
.if ${MK_CCD} != "no"
CRUNCH_PROGS_sbin+= ccdconfig
.endif
.if ${MK_INET} != "no" || ${MK_INET6} != "no"
CRUNCH_PROGS_sbin+= ping
.endif
.if ${MK_INET6_SUPPORT} != "no"
CRUNCH_ALIAS_ping= ping6
CRUNCH_PROGS_sbin+= rtsol
.endif
.if ${MK_IPFILTER} != "no"
CRUNCH_PROGS_sbin+= ipf
CRUNCH_LIBS_ipf+= ${LIBIPF}
.endif
.if ${MK_ROUTED} != "no"
CRUNCH_PROGS_sbin+= routed rtquery
.endif
.if ${MK_ZFS} != "no"
CRUNCH_PROGS_sbin+= bectl
CRUNCH_PROGS_sbin+= zfs
CRUNCH_PROGS_sbin+= zpool
CRUNCH_PROGS_usr.sbin+= zdb
.endif
# crunchgen does not like C++ programs; this should be fixed someday
# CRUNCH_PROGS+= devd
CRUNCH_LIBS+= -l80211 -lalias -lcam -lncursesw -ldevstat -lipsec -llzma
.if ${MK_ZFS} != "no"
-CRUNCH_LIBS+= -lavl -lpthread -luutil -lumem -ltpool -lspl
+CRUNCH_LIBS+= -lavl -lpthread -luutil -lumem -ltpool -lspl -lrt
CRUNCH_LIBS_zfs+= ${LIBBE} \
${LIBZPOOL} \
${LIBZFS} \
${LIBZUTIL} \
${LIBZFS_CORE} \
${LIBZFSBOOTENV} \
${LIBICP_RESCUE} \
${LIBNVPAIR}
CRUNCH_LIBS_bectl+= ${CRUNCH_LIBS_zfs}
CRUNCH_LIBS_zpool+= ${CRUNCH_LIBS_zfs}
CRUNCH_LIBS_zdb+= ${CRUNCH_LIBS_zfs}
.else
# liblzma needs pthread
CRUNCH_LIBS+= -lpthread
.endif
CRUNCH_LIBS+= -lgeom -lbsdxml -lkiconv
.if ${MK_OPENSSL} == "no"
CRUNCH_LIBS+= -lmd
.endif
CRUNCH_LIBS+= -lmt -lsbuf -lufs -lz
.if ${MACHINE_CPUARCH} == "i386"
CRUNCH_PROGS_sbin+= bsdlabel fdisk
CRUNCH_ALIAS_bsdlabel= disklabel
#CRUNCH_PROGS+= mount_smbfs
#CRUNCH_LIBS+= -lsmb
.endif
.if ${MACHINE_CPUARCH} == "amd64"
CRUNCH_PROGS_sbin+= bsdlabel fdisk
CRUNCH_ALIAS_bsdlabel= disklabel
.endif
CRUNCH_SRCDIR_rtquery= ${SRCTOP}/sbin/routed/rtquery
CRUNCH_SRCDIR_ipf= ${SRCTOP}/sbin/ipf/ipf
.if ${MK_ZFS} != "no"
CRUNCH_SRCDIR_zfs= ${SRCTOP}/cddl/sbin/zfs
CRUNCH_SRCDIR_zpool= ${SRCTOP}/cddl/sbin/zpool
CRUNCH_SRCDIR_zdb= ${SRCTOP}/cddl/usr.sbin/zdb
.endif
CRUNCH_ALIAS_reboot= fastboot halt fasthalt
CRUNCH_ALIAS_restore= rrestore
CRUNCH_ALIAS_dump= rdump
CRUNCH_ALIAS_fsck_ffs= fsck_4.2bsd fsck_ufs
CRUNCH_ALIAS_geom= glabel gpart
CRUNCH_ALIAS_shutdown= poweroff
# dhclient has historically been troublesome...
CRUNCH_PROGS_sbin+= dhclient
##################################################################
# Programs from stock /usr/bin
#
CRUNCH_SRCDIRS+= usr.bin
CRUNCH_PROGS_usr.bin= head mt sed tail tee
CRUNCH_PROGS_usr.bin+= gzip
CRUNCH_ALIAS_gzip= gunzip gzcat zcat
CRUNCH_PROGS_usr.bin+= bzip2
CRUNCH_ALIAS_bzip2= bunzip2 bzcat
CRUNCH_LIBS+= -lbz2
CRUNCH_PROGS_usr.bin+= less
CRUNCH_ALIAS_less= more
CRUNCH_PROGS_usr.bin+= xz
CRUNCH_ALIAS_xz= unxz lzma unlzma xzcat lzcat
CRUNCH_PROGS_usr.bin+= zstd
CRUNCH_ALIAS_zstd= unzstd zstdcat zstdmt
CRUNCH_LIBS+= ${LDADD_zstd}
CRUNCH_PROGS_usr.bin+= fetch
CRUNCH_LIBS+= -lfetch
CRUNCH_PROGS_usr.bin+= tar
CRUNCH_LIBS+= -larchive
.if ${MK_OPENSSL} != "no"
CRUNCH_LIBS+= -lssl -lcrypto
.endif
CRUNCH_LIBS+= -lmd
.if ${MK_NETCAT} != "no"
CRUNCH_PROGS_usr.bin+= nc
.endif
.if ${MK_VI} != "no"
CRUNCH_PROGS_usr.bin+= vi
CRUNCH_ALIAS_vi= ex
.endif
CRUNCH_PROGS_usr.bin+= id
CRUNCH_ALIAS_id= groups whoami
##################################################################
# Programs from stock /usr/sbin
#
CRUNCH_SRCDIRS+= usr.sbin
CRUNCH_PROGS_usr.sbin+= chroot
CRUNCH_PROGS_usr.sbin+= chown
CRUNCH_ALIAS_chown= chgrp
##################################################################
CRUNCH_LIBS+= ${OBJTOP}/lib/libifconfig/libifconfig.a
CRUNCH_BUILDOPTS+= CRUNCH_CFLAGS+=-I${OBJTOP}/lib/libifconfig
CRUNCH_LIBS_ifconfig+= ${LIBNV}
CRUNCH_LIBS+= -lm
.if ${MK_ISCSI} != "no"
CRUNCH_PROGS_usr.bin+= iscsictl
CRUNCH_PROGS_usr.sbin+= iscsid
CRUNCH_LIBS+= ${OBJTOP}/lib/libiscsiutil/libiscsiutil.a
CRUNCH_BUILDOPTS+= CRUNCH_CFLAGS+=-I${OBJTOP}/lib/libiscsiutil
.endif
.include <bsd.crunchgen.mk>
.include <bsd.prog.mk>
diff --git a/rescue/rescue/Makefile.depend b/rescue/rescue/Makefile.depend
index d9f20a6afd5a..0bfb25d3f174 100644
--- a/rescue/rescue/Makefile.depend
+++ b/rescue/rescue/Makefile.depend
@@ -1,64 +1,65 @@
# Autogenerated - do NOT edit!
DIRDEPS = \
cddl/lib/libavl \
cddl/lib/libicp_rescue \
cddl/lib/libnvpair \
cddl/lib/libspl \
cddl/lib/libtpool \
cddl/lib/libumem \
cddl/lib/libuutil \
cddl/lib/libzfs \
cddl/lib/libzfs_core \
cddl/lib/libzfsbootenv \
cddl/lib/libzpool \
cddl/lib/libzutil \
lib/${CSU_DIR} \
lib/lib80211 \
lib/libalias/libalias \
lib/libarchive \
lib/libbe \
lib/libbz2 \
lib/libc \
lib/libcam \
lib/libcompiler_rt \
lib/libcrypt \
lib/libdevstat \
lib/libedit \
lib/libelf \
lib/libexpat \
lib/libfetch \
lib/libgeom \
lib/libifconfig \
lib/libipsec \
lib/libiscsiutil \
lib/libjail \
lib/libkiconv \
lib/libkvm \
lib/liblzma \
lib/libmd \
lib/libmt \
lib/libnv \
+ lib/librt \
lib/libsbuf \
lib/libthr \
lib/libufs \
lib/libutil \
lib/libxo/libxo \
lib/libz \
lib/libzstd \
lib/msun \
lib/ncurses/ncurses \
lib/ncurses/tinfo \
rescue/librescue \
sbin/ipf/libipf \
secure/lib/libcrypto \
secure/lib/libssl \
usr.sbin/crunch/crunchgen.host \
usr.sbin/crunch/crunchide.host \
.include <dirdeps.mk>
.if ${DEP_RELDIR} == ${_DEP_RELDIR}
# local dependencies - needed for -jN in clean tree
.endif
diff --git a/share/mk/src.libnames.mk b/share/mk/src.libnames.mk
index d458b28adf3d..6a79d0b864d3 100644
--- a/share/mk/src.libnames.mk
+++ b/share/mk/src.libnames.mk
@@ -1,793 +1,793 @@
#
# The include file <src.libnames.mk> define library names suitable
# for INTERNALLIB and PRIVATELIB definition
.if !target(__<bsd.init.mk>__)
.error src.libnames.mk cannot be included directly.
.endif
.if !target(__<src.libnames.mk>__)
__<src.libnames.mk>__:
.include <src.opts.mk>
_PRIVATELIBS= \
atf_c \
atf_cxx \
auditd \
bsddialog \
bsdstat \
cbor \
devdctl \
event1 \
fido2 \
gmock \
gtest \
gmock_main \
gtest_main \
heimipcc \
heimipcs \
ldns \
sqlite3 \
ssh \
ucl \
unbound \
zstd
# Let projects based on FreeBSD append to _PRIVATELIBS
# by maintaining their own LOCAL_PRIVATELIBS list.
_PRIVATELIBS+= ${LOCAL_PRIVATELIBS}
_INTERNALLIBS= \
amu \
bsnmptools \
c_nossp_pic \
cron \
elftc \
fifolog \
ifconfig \
ipf \
iscsiutil \
lpr \
lua \
lutok \
netbsd \
ntp \
ntpevent \
openbsd \
opts \
parse \
pe \
pfctl \
pmcstat \
sl \
sm \
smdb \
smutil \
telnet \
vers \
wpaap \
wpacommon \
wpacrypto \
wpadrivers \
wpaeap_common \
wpaeap_peer \
wpaeap_server \
wpaeapol_auth \
wpaeapol_supp \
wpal2_packet \
wparadius \
wparsn_supp \
wpatls \
wpautils \
wpawps
# Let projects based on FreeBSD append to _INTERNALLIBS
# by maintaining their own LOCAL_INTERNALLIBS list.
_INTERNALLIBS+= ${LOCAL_INTERNALLIBS}
_LIBRARIES= \
${_PRIVATELIBS} \
${_INTERNALLIBS} \
${LOCAL_LIBRARIES} \
80211 \
9p \
alias \
archive \
asn1 \
avl \
BlocksRuntime \
be \
begemot \
bluetooth \
bsdxml \
bsm \
bsnmp \
bz2 \
c \
c_pic \
calendar \
cam \
casper \
cap_dns \
cap_fileargs \
cap_grp \
cap_net \
cap_netdb \
cap_pwd \
cap_sysctl \
cap_syslog \
com_err \
compiler_rt \
crypt \
crypto \
ctf \
cuse \
cxxrt \
devctl \
devdctl \
devinfo \
devstat \
dialog \
dl \
dpv \
dtrace \
dwarf \
edit \
efivar \
elf \
execinfo \
fetch \
figpar \
formw \
geom \
gpio \
gssapi \
gssapi_krb5 \
hdb \
heimbase \
heimntlm \
heimsqlite \
hx509 \
icp \
ipsec \
ipt \
jail \
kadm5clnt \
kadm5srv \
kafs5 \
kdc \
kiconv \
krb5 \
kvm \
l \
lzma \
m \
magic \
md \
memstat \
mp \
mt \
ncursesw \
netgraph \
netmap \
ngatm \
nv \
nvpair \
opencsd \
pam \
panel \
panelw \
pcap \
pcsclite \
pjdlog \
pmc \
proc \
procstat \
pthread \
radius \
regex \
roken \
rpcsec_gss \
rpcsvc \
rt \
rtld_db \
sbuf \
sdp \
sm \
smb \
spl \
ssl \
ssp_nonshared \
stats \
stdthreads \
supcplusplus \
sysdecode \
tacplus \
termcapw \
tinfow \
tpool \
ufs \
ugidfw \
ulog \
umem \
usb \
usbhid \
util \
uutil \
vmmapi \
wind \
wrap \
xo \
y \
ypclnt \
z \
zfs_core \
zfs \
zfsbootenv \
zpool \
zutil
.if ${MK_BLACKLIST} != "no"
_LIBRARIES+= \
blacklist \
.endif
.if ${MK_OFED} != "no"
_LIBRARIES+= \
cxgb4 \
ibcm \
ibmad \
ibnetdisc \
ibumad \
ibverbs \
irdma \
mlx4 \
mlx5 \
rdmacm \
osmcomp \
opensm \
osmvendor
.endif
.if ${MK_BEARSSL} == "yes"
_LIBRARIES+= \
bearssl \
secureboot \
LIBBEARSSL?= ${LIBBEARSSLDIR}/libbearssl.a
LIBSECUREBOOT?= ${LIBSECUREBOOTDIR}/libsecureboot.a
.endif
.if ${MK_VERIEXEC} == "yes"
_LIBRARIES+= veriexec
LIBVERIEXEC?= ${LIBVERIEXECDIR}/libveriexec.a
.endif
# Each library's LIBADD needs to be duplicated here for static linkage of
# 2nd+ order consumers. Auto-generating this would be better.
_DP_80211= sbuf bsdxml
_DP_9p= sbuf
.if ${MK_CASPER} != "no"
_DP_9p+= casper cap_pwd cap_grp
.endif
# XXX: Not bootstrapped so uses host version on non-FreeBSD, so don't use a
# FreeBSD-specific dependency list
.if ${.MAKE.OS} == "FreeBSD" || !defined(BOOTSTRAPPING)
_DP_archive= z bz2 lzma bsdxml zstd
.endif
_DP_avl= spl
_DP_bsddialog= formw ncursesw tinfow
_DP_zstd= pthread
.if ${MK_BLACKLIST} != "no"
_DP_blacklist+= pthread
.endif
_DP_crypto= pthread
# See comment by _DP_archive above
.if ${.MAKE.OS} == "FreeBSD" || !defined(BOOTSTRAPPING)
.if ${MK_OPENSSL} != "no"
_DP_archive+= crypto
.else
_DP_archive+= md
.endif
.endif
_DP_sqlite3= pthread
_DP_ssl= crypto
_DP_ssh= crypto crypt z
.if ${MK_LDNS} != "no"
_DP_ssh+= ldns
.endif
_DP_edit= tinfow
.if ${MK_OPENSSL} != "no"
_DP_bsnmp= crypto
.endif
_DP_geom= bsdxml sbuf
_DP_cam= sbuf
_DP_kvm= elf
_DP_casper= nv
_DP_cap_dns= nv
_DP_cap_fileargs= nv
_DP_cap_grp= nv
_DP_cap_pwd= nv
_DP_cap_sysctl= nv
_DP_cap_syslog= nv
.if ${MK_OFED} != "no"
_DP_pcap= ibverbs mlx5
.endif
_DP_pjdlog= util
_DP_usb= pthread
_DP_unbound= ssl crypto pthread
_DP_rt= pthread
.if ${MK_OPENSSL} == "no"
_DP_radius= md
.else
_DP_radius= crypto
.endif
_DP_rtld_db= elf procstat
_DP_procstat= kvm util elf
_DP_proc= cxxrt
.if ${MK_CDDL} != "no"
_DP_proc+= ctf
.endif
_DP_proc+= elf procstat rtld_db util z
_DP_mp= crypto
_DP_memstat= kvm
_DP_magic= z
_DP_mt= sbuf bsdxml
_DP_ldns= ssl crypto
_DP_lua= m
_DP_lutok= lua
.if ${MK_OPENSSL} != "no"
_DP_fetch= ssl crypto
.else
_DP_fetch= md
.endif
_DP_execinfo= elf
_DP_dwarf= elf z
_DP_dpv= dialog figpar util tinfow ncursesw
_DP_dialog= tinfow ncursesw m
_DP_cuse= pthread
_DP_atf_cxx= atf_c
_DP_gtest= pthread regex
_DP_gmock= gtest
_DP_gmock_main= gmock
_DP_gtest_main= gtest
_DP_devstat= kvm
_DP_pam= radius tacplus md util
.if ${MK_KERBEROS} != "no"
_DP_pam+= krb5
.endif
.if ${MK_OPENSSH} != "no"
_DP_fido2+= crypto z
_DP_pam+= ssh
.endif
.if ${MK_NIS} != "no"
_DP_pam+= ypclnt
.endif
_DP_roken= crypt
_DP_kadm5clnt= com_err krb5 roken
_DP_kadm5srv= com_err hdb krb5 roken
_DP_heimntlm= crypto com_err krb5 roken
_DP_hx509= asn1 com_err crypto roken wind
_DP_hdb= asn1 com_err krb5 roken sqlite3
_DP_asn1= com_err roken
_DP_kdc= roken hdb hx509 krb5 heimntlm asn1 crypto
_DP_wind= com_err roken
_DP_heimbase= pthread
_DP_heimipcc= heimbase roken pthread
_DP_heimipcs= heimbase roken pthread
_DP_kafs5= asn1 krb5 roken
_DP_krb5= asn1 com_err crypt crypto hx509 roken wind heimbase heimipcc
_DP_gssapi_krb5= gssapi krb5 crypto roken asn1 com_err
_DP_lzma= md pthread
_DP_ucl= m
_DP_vmmapi= util
_DP_opencsd= cxxrt
_DP_ctf= spl z
_DP_dtrace= ctf elf proc pthread rtld_db
_DP_xo= util
_DP_ztest= geom m nvpair umem zpool pthread avl zfs_core spl zutil zfs uutil icp
# The libc dependencies are not strictly needed but are defined to make the
# assert happy.
_DP_c= compiler_rt
# Use libssp_nonshared only on i386 and power*. Other archs emit direct calls
# to __stack_chk_fail, not __stack_chk_fail_local provided by libssp_nonshared.
.if ${MK_SSP} != "no" && \
(${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH:Mpower*} != "")
_DP_c+= ssp_nonshared
.endif
_DP_stats= sbuf pthread
_DP_stdthreads= pthread
_DP_tacplus= md pam
_DP_ncursesw= tinfow
_DP_formw= ncursesw
_DP_nvpair= spl
_DP_panelw= ncursesw
_DP_rpcsec_gss= gssapi
_DP_smb= kiconv
_DP_ulog= md
_DP_fifolog= z
_DP_ipf= kvm
_DP_tpool= spl
_DP_uutil= avl spl
-_DP_zfs= md pthread umem util uutil m avl bsdxml crypto geom nvpair \
+_DP_zfs= md pthread rt umem util uutil m avl bsdxml crypto geom nvpair \
z zfs_core zutil
_DP_zfsbootenv= zfs nvpair
_DP_zfs_core= nvpair spl zutil
_DP_zpool= md pthread z icp spl nvpair avl umem
_DP_zutil= avl geom m tpool
_DP_be= zfs spl nvpair zfsbootenv
_DP_netmap=
_DP_ifconfig= m
_DP_pfctl= nv
# OFED support
.if ${MK_OFED} != "no"
_DP_cxgb4= ibverbs pthread
_DP_ibcm= ibverbs
_DP_ibmad= ibumad
_DP_ibnetdisc= osmcomp ibmad ibumad
_DP_ibumad=
_DP_ibverbs=
_DP_irdma= ibverbs pthread
_DP_mlx4= ibverbs pthread
_DP_mlx5= ibverbs pthread
_DP_rdmacm= ibverbs
_DP_osmcomp= pthread
_DP_opensm= pthread
_DP_osmvendor= ibumad pthread
.endif
# Define special cases
LDADD_supcplusplus= -lsupc++
LIBATF_C= ${LIBDESTDIR}${LIBDIR_BASE}/libprivateatf-c.a
LIBATF_CXX= ${LIBDESTDIR}${LIBDIR_BASE}/libprivateatf-c++.a
LDADD_atf_c= -lprivateatf-c
LDADD_atf_cxx= -lprivateatf-c++
LIBGMOCK= ${LIBDESTDIR}${LIBDIR_BASE}/libprivategmock.a
LIBGMOCK_MAIN= ${LIBDESTDIR}${LIBDIR_BASE}/libprivategmock_main.a
LIBGTEST= ${LIBDESTDIR}${LIBDIR_BASE}/libprivategtest.a
LIBGTEST_MAIN= ${LIBDESTDIR}${LIBDIR_BASE}/libprivategtest_main.a
LDADD_gmock= -lprivategmock
LDADD_gtest= -lprivategtest
LDADD_gmock_main= -lprivategmock_main
LDADD_gtest_main= -lprivategtest_main
.for _l in ${_PRIVATELIBS}
LIB${_l:tu}?= ${LIBDESTDIR}${LIBDIR_BASE}/libprivate${_l}.a
.endfor
.if ${MK_PIE} != "no"
PIE_SUFFIX= _pie
.endif
.for _l in ${_LIBRARIES}
.if ${_INTERNALLIBS:M${_l}} || !defined(SYSROOT)
LDADD_${_l}_L+= -L${LIB${_l:tu}DIR}
.endif
DPADD_${_l}?= ${LIB${_l:tu}}
.if ${_PRIVATELIBS:M${_l}}
LDADD_${_l}?= -lprivate${_l}
.elif ${_INTERNALLIBS:M${_l}}
LDADD_${_l}?= ${LDADD_${_l}_L} -l${_l:S/${PIE_SUFFIX}//}${PIE_SUFFIX}
.else
LDADD_${_l}?= ${LDADD_${_l}_L} -l${_l}
.endif
# Add in all dependencies for static linkage.
# Bootstrapping from non-FreeBSD needs special handling, since it overrides
# NO_SHARED back to yes despite only building static versions of bootstrap
# libraries (see tools/build/mk/Makefile.boot.pre).
.if defined(_DP_${_l}) && (${_INTERNALLIBS:M${_l}} || \
(defined(NO_SHARED) && ${NO_SHARED:tl} != "no") || \
(defined(BOOTSTRAPPING) && ${.MAKE.OS} != "FreeBSD"))
.for _d in ${_DP_${_l}}
DPADD_${_l}+= ${DPADD_${_d}}
LDADD_${_l}+= ${LDADD_${_d}}
.endfor
.endif
.endfor
# These are special cases where the library is broken and anything that uses
# it needs to add more dependencies. Broken usually means that it has a
# cyclic dependency and cannot link its own dependencies. This is bad, please
# fix the library instead.
# Unless the library itself is broken then the proper place to define
# dependencies is _DP_* above.
# libatf-c++ exposes libatf-c abi hence we need to explicit link to atf_c for
# atf_cxx
DPADD_atf_cxx+= ${DPADD_atf_c}
LDADD_atf_cxx+= ${LDADD_atf_c}
DPADD_gmock+= ${DPADD_gtest}
LDADD_gmock+= ${LDADD_gtest}
DPADD_gmock_main+= ${DPADD_gmock}
LDADD_gmock_main+= ${LDADD_gmock}
DPADD_gtest_main+= ${DPADD_gtest}
LDADD_gtest_main+= ${LDADD_gtest}
# Detect LDADD/DPADD that should be LIBADD, before modifying LDADD here.
_BADLDADD=
.for _l in ${LDADD:M-l*:N-l*/*:C,^-l,,}
.if ${_LIBRARIES:M${_l}} && !${_PRIVATELIBS:M${_l}}
_BADLDADD+= ${_l}
.endif
.endfor
.if !empty(_BADLDADD)
.error ${.CURDIR}: These libraries should be LIBADD+=foo rather than DPADD/LDADD+=-lfoo: ${_BADLDADD}
.endif
.for _l in ${LIBADD}
DPADD+= ${DPADD_${_l}}
LDADD+= ${LDADD_${_l}}
.endfor
_LIB_OBJTOP?= ${OBJTOP}
# INTERNALLIB definitions.
LIBELFTCDIR= ${_LIB_OBJTOP}/lib/libelftc
LIBELFTC?= ${LIBELFTCDIR}/libelftc${PIE_SUFFIX}.a
LIBLUADIR= ${_LIB_OBJTOP}/lib/liblua
LIBLUA?= ${LIBLUADIR}/liblua${PIE_SUFFIX}.a
LIBLUTOKDIR= ${_LIB_OBJTOP}/lib/liblutok
LIBLUTOK?= ${LIBLUTOKDIR}/liblutok${PIE_SUFFIX}.a
LIBPEDIR= ${_LIB_OBJTOP}/lib/libpe
LIBPE?= ${LIBPEDIR}/libpe${PIE_SUFFIX}.a
LIBOPENBSDDIR= ${_LIB_OBJTOP}/lib/libopenbsd
LIBOPENBSD?= ${LIBOPENBSDDIR}/libopenbsd${PIE_SUFFIX}.a
LIBSMDIR= ${_LIB_OBJTOP}/lib/libsm
LIBSM?= ${LIBSMDIR}/libsm${PIE_SUFFIX}.a
LIBSMDBDIR= ${_LIB_OBJTOP}/lib/libsmdb
LIBSMDB?= ${LIBSMDBDIR}/libsmdb${PIE_SUFFIX}.a
LIBSMUTILDIR= ${_LIB_OBJTOP}/lib/libsmutil
LIBSMUTIL?= ${LIBSMUTILDIR}/libsmutil${PIE_SUFFIX}.a
LIBNETBSDDIR?= ${_LIB_OBJTOP}/lib/libnetbsd
LIBNETBSD?= ${LIBNETBSDDIR}/libnetbsd${PIE_SUFFIX}.a
LIBVERSDIR?= ${_LIB_OBJTOP}/kerberos5/lib/libvers
LIBVERS?= ${LIBVERSDIR}/libvers${PIE_SUFFIX}.a
LIBSLDIR= ${_LIB_OBJTOP}/kerberos5/lib/libsl
LIBSL?= ${LIBSLDIR}/libsl${PIE_SUFFIX}.a
LIBIFCONFIGDIR= ${_LIB_OBJTOP}/lib/libifconfig
LIBIFCONFIG?= ${LIBIFCONFIGDIR}/libifconfig${PIE_SUFFIX}.a
LIBIPFDIR= ${_LIB_OBJTOP}/sbin/ipf/libipf
LIBIPF?= ${LIBIPFDIR}/libipf${PIE_SUFFIX}.a
LIBNVDIR= ${_LIB_OBJTOP}/lib/libnv
LIBNV?= ${LIBNVDIR}/libnv${PIE_SUFFIX}.a
LIBISCSIUTILDIR= ${_LIB_OBJTOP}/lib/libiscsiutil
LIBISCSIUTIL?= ${LIBISCSIUTILDIR}/libiscsiutil${PIE_SUFFIX}.a
LIBTELNETDIR= ${_LIB_OBJTOP}/lib/libtelnet
LIBTELNET?= ${LIBTELNETDIR}/libtelnet${PIE_SUFFIX}.a
LIBCRONDIR= ${_LIB_OBJTOP}/usr.sbin/cron/lib
LIBCRON?= ${LIBCRONDIR}/libcron${PIE_SUFFIX}.a
LIBNTPDIR= ${_LIB_OBJTOP}/usr.sbin/ntp/libntp
LIBNTP?= ${LIBNTPDIR}/libntp${PIE_SUFFIX}.a
LIBNTPEVENTDIR= ${_LIB_OBJTOP}/usr.sbin/ntp/libntpevent
LIBNTPEVENT?= ${LIBNTPEVENTDIR}/libntpevent${PIE_SUFFIX}.a
LIBOPTSDIR= ${_LIB_OBJTOP}/usr.sbin/ntp/libopts
LIBOPTS?= ${LIBOPTSDIR}/libopts${PIE_SUFFIX}.a
LIBPARSEDIR= ${_LIB_OBJTOP}/usr.sbin/ntp/libparse
LIBPARSE?= ${LIBPARSEDIR}/libparse${PIE_SUFFIX}.a
LIBPFCTL= ${_LIB_OBJTOP}/lib/libpfctl
LIBPFCTL?= ${LIBPFCTLDIR}/libpfctl${PIE_SUFFIX}.a
LIBLPRDIR= ${_LIB_OBJTOP}/usr.sbin/lpr/common_source
LIBLPR?= ${LIBLPRDIR}/liblpr${PIE_SUFFIX}.a
LIBFIFOLOGDIR= ${_LIB_OBJTOP}/usr.sbin/fifolog/lib
LIBFIFOLOG?= ${LIBFIFOLOGDIR}/libfifolog${PIE_SUFFIX}.a
LIBBSNMPTOOLSDIR= ${_LIB_OBJTOP}/usr.sbin/bsnmpd/tools/libbsnmptools
LIBBSNMPTOOLS?= ${LIBBSNMPTOOLSDIR}/libbsnmptools${PIE_SUFFIX}.a
LIBBE?= ${LIBBEDIR}/libbe${PIE_SUFFIX}.a
LIBPMCSTATDIR= ${_LIB_OBJTOP}/lib/libpmcstat
LIBPMCSTAT?= ${LIBPMCSTATDIR}/libpmcstat${PIE_SUFFIX}.a
LIBWPAAPDIR= ${_LIB_OBJTOP}/usr.sbin/wpa/src/ap
LIBWPAAP?= ${LIBWPAAPDIR}/libwpaap${PIE_SUFFIX}.a
LIBWPACOMMONDIR= ${_LIB_OBJTOP}/usr.sbin/wpa/src/common
LIBWPACOMMON?= ${LIBWPACOMMONDIR}/libwpacommon${PIE_SUFFIX}.a
LIBWPACRYPTODIR= ${_LIB_OBJTOP}/usr.sbin/wpa/src/crypto
LIBWPACRYPTO?= ${LIBWPACRYPTODIR}/libwpacrypto${PIE_SUFFIX}.a
LIBWPADRIVERSDIR= ${_LIB_OBJTOP}/usr.sbin/wpa/src/drivers
LIBWPADRIVERS?= ${LIBWPADRIVERSDIR}/libwpadrivers${PIE_SUFFIX}.a
LIBWPAEAP_COMMONDIR= ${_LIB_OBJTOP}/usr.sbin/wpa/src/eap_common
LIBWPAEAP_COMMON?= ${LIBWPAEAP_COMMONDIR}/libwpaeap_common${PIE_SUFFIX}.a
LIBWPAEAP_PEERDIR= ${_LIB_OBJTOP}/usr.sbin/wpa/src/eap_peer
LIBWPAEAP_PEER?= ${LIBWPAEAP_PEERDIR}/libwpaeap_peer${PIE_SUFFIX}.a
LIBWPAEAP_SERVERDIR= ${_LIB_OBJTOP}/usr.sbin/wpa/src/eap_server
LIBWPAEAP_SERVER?= ${LIBWPAEAP_SERVERDIR}/libwpaeap_server${PIE_SUFFIX}.a
LIBWPAEAPOL_AUTHDIR= ${_LIB_OBJTOP}/usr.sbin/wpa/src/eapol_auth
LIBWPAEAPOL_AUTH?= ${LIBWPAEAPOL_AUTHDIR}/libwpaeapol_auth${PIE_SUFFIX}.a
LIBWPAEAPOL_SUPPDIR= ${_LIB_OBJTOP}/usr.sbin/wpa/src/eapol_supp
LIBWPAEAPOL_SUPP?= ${LIBWPAEAPOL_SUPPDIR}/libwpaeapol_supp${PIE_SUFFIX}.a
LIBWPAL2_PACKETDIR= ${_LIB_OBJTOP}/usr.sbin/wpa/src/l2_packet
LIBWPAL2_PACKET?= ${LIBWPAL2_PACKETDIR}/libwpal2_packet${PIE_SUFFIX}.a
LIBWPARADIUSDIR= ${_LIB_OBJTOP}/usr.sbin/wpa/src/radius
LIBWPARADIUS?= ${LIBWPARADIUSDIR}/libwparadius${PIE_SUFFIX}.a
LIBWPARSN_SUPPDIR= ${_LIB_OBJTOP}/usr.sbin/wpa/src/rsn_supp
LIBWPARSN_SUPP?= ${LIBWPARSN_SUPPDIR}/libwparsn_supp${PIE_SUFFIX}.a
LIBWPATLSDIR= ${_LIB_OBJTOP}/usr.sbin/wpa/src/tls
LIBWPATLS?= ${LIBWPATLSDIR}/libwpatls${PIE_SUFFIX}.a
LIBWPAUTILSDIR= ${_LIB_OBJTOP}/usr.sbin/wpa/src/utils
LIBWPAUTILS?= ${LIBWPAUTILSDIR}/libwpautils${PIE_SUFFIX}.a
LIBWPAWPSDIR= ${_LIB_OBJTOP}/usr.sbin/wpa/src/wps
LIBWPAWPS?= ${LIBWPAWPSDIR}/libwpawps${PIE_SUFFIX}.a
LIBC_NOSSP_PICDIR= ${_LIB_OBJTOP}/lib/libc
LIBC_NOSSP_PIC?= ${LIBC_NOSSP_PICDIR}/libc_nossp_pic.a
# Define a directory for each library. This is useful for adding -L in when
# not using a --sysroot or for meta mode bootstrapping when there is no
# Makefile.depend. These are sorted by directory.
LIBAVLDIR= ${_LIB_OBJTOP}/cddl/lib/libavl
LIBCTFDIR= ${_LIB_OBJTOP}/cddl/lib/libctf
LIBDTRACEDIR= ${_LIB_OBJTOP}/cddl/lib/libdtrace
LIBICPDIR= ${_LIB_OBJTOP}/cddl/lib/libicp
LIBICP?= ${LIBICPDIR}/libicp${PIE_SUFFIX}.a
LIBICP_RESCUEDIR= ${_LIB_OBJTOP}/cddl/lib/libicp_rescue
LIBICP_RESCUE?= ${LIBICP_RESCUEDIR}/libicp_rescue${PIE_SUFFIX}.a
LIBNVPAIRDIR= ${_LIB_OBJTOP}/cddl/lib/libnvpair
LIBNVPAIR?= ${LIBNVPAIRDIR}/libnvpair${PIE_SUFFIX}.a
LIBUMEMDIR= ${_LIB_OBJTOP}/cddl/lib/libumem
LIBUUTILDIR= ${_LIB_OBJTOP}/cddl/lib/libuutil
LIBZFSDIR= ${_LIB_OBJTOP}/cddl/lib/libzfs
LIBZFS?= ${LIBZFSDIR}/libzfs${PIE_SUFFIX}.a
LIBZFS_COREDIR= ${_LIB_OBJTOP}/cddl/lib/libzfs_core
LIBZFS_CORE?= ${LIBZFS_COREDIR}/libzfs_core${PIE_SUFFIX}.a
LIBZFSBOOTENVDIR= ${_LIB_OBJTOP}/cddl/lib/libzfsbootenv
LIBZFSBOOTENV?= ${LIBZFSBOOTENVDIR}/libzfsbootenv${PIE_SUFFIX}.a
LIBZPOOLDIR= ${_LIB_OBJTOP}/cddl/lib/libzpool
LIBZPOOL?= ${LIBZPOOLDIR}/libzpool${PIE_SUFFIX}.a
LIBZUTILDIR= ${_LIB_OBJTOP}/cddl/lib/libzutil
LIBZUTIL?= ${LIBZUTILDIR}/libzutil${PIE_SUFFIX}.a
LIBTPOOLDIR= ${_LIB_OBJTOP}/cddl/lib/libtpool
# OFED support
LIBCXGB4DIR= ${_LIB_OBJTOP}/lib/ofed/libcxgb4
LIBIBCMDIR= ${_LIB_OBJTOP}/lib/ofed/libibcm
LIBIBMADDIR= ${_LIB_OBJTOP}/lib/ofed/libibmad
LIBIBNETDISCDIR=${_LIB_OBJTOP}/lib/ofed/libibnetdisc
LIBIBUMADDIR= ${_LIB_OBJTOP}/lib/ofed/libibumad
LIBIBVERBSDIR= ${_LIB_OBJTOP}/lib/ofed/libibverbs
LIBIRDMADIR= ${_LIB_OBJTOP}/lib/ofed/libirdma
LIBMLX4DIR= ${_LIB_OBJTOP}/lib/ofed/libmlx4
LIBMLX5DIR= ${_LIB_OBJTOP}/lib/ofed/libmlx5
LIBRDMACMDIR= ${_LIB_OBJTOP}/lib/ofed/librdmacm
LIBOSMCOMPDIR= ${_LIB_OBJTOP}/lib/ofed/complib
LIBOPENSMDIR= ${_LIB_OBJTOP}/lib/ofed/libopensm
LIBOSMVENDORDIR=${_LIB_OBJTOP}/lib/ofed/libvendor
LIBDIALOGDIR= ${_LIB_OBJTOP}/gnu/lib/libdialog
LIBSSPDIR= ${_LIB_OBJTOP}/lib/libssp
LIBSSP_NONSHAREDDIR= ${_LIB_OBJTOP}/lib/libssp_nonshared
LIBASN1DIR= ${_LIB_OBJTOP}/kerberos5/lib/libasn1
LIBGSSAPI_KRB5DIR= ${_LIB_OBJTOP}/kerberos5/lib/libgssapi_krb5
LIBGSSAPI_NTLMDIR= ${_LIB_OBJTOP}/kerberos5/lib/libgssapi_ntlm
LIBGSSAPI_SPNEGODIR= ${_LIB_OBJTOP}/kerberos5/lib/libgssapi_spnego
LIBHDBDIR= ${_LIB_OBJTOP}/kerberos5/lib/libhdb
LIBHEIMBASEDIR= ${_LIB_OBJTOP}/kerberos5/lib/libheimbase
LIBHEIMIPCCDIR= ${_LIB_OBJTOP}/kerberos5/lib/libheimipcc
LIBHEIMIPCSDIR= ${_LIB_OBJTOP}/kerberos5/lib/libheimipcs
LIBHEIMNTLMDIR= ${_LIB_OBJTOP}/kerberos5/lib/libheimntlm
LIBHX509DIR= ${_LIB_OBJTOP}/kerberos5/lib/libhx509
LIBKADM5CLNTDIR= ${_LIB_OBJTOP}/kerberos5/lib/libkadm5clnt
LIBKADM5SRVDIR= ${_LIB_OBJTOP}/kerberos5/lib/libkadm5srv
LIBKAFS5DIR= ${_LIB_OBJTOP}/kerberos5/lib/libkafs5
LIBKDCDIR= ${_LIB_OBJTOP}/kerberos5/lib/libkdc
LIBKRB5DIR= ${_LIB_OBJTOP}/kerberos5/lib/libkrb5
LIBROKENDIR= ${_LIB_OBJTOP}/kerberos5/lib/libroken
LIBWINDDIR= ${_LIB_OBJTOP}/kerberos5/lib/libwind
LIBATF_CDIR= ${_LIB_OBJTOP}/lib/atf/libatf-c
LIBATF_CXXDIR= ${_LIB_OBJTOP}/lib/atf/libatf-c++
LIBGMOCKDIR= ${_LIB_OBJTOP}/lib/googletest/gmock
LIBGMOCK_MAINDIR= ${_LIB_OBJTOP}/lib/googletest/gmock_main
LIBGTESTDIR= ${_LIB_OBJTOP}/lib/googletest/gtest
LIBGTEST_MAINDIR= ${_LIB_OBJTOP}/lib/googletest/gtest_main
LIBALIASDIR= ${_LIB_OBJTOP}/lib/libalias/libalias
LIBBLACKLISTDIR= ${_LIB_OBJTOP}/lib/libblacklist
LIBBLOCKSRUNTIMEDIR= ${_LIB_OBJTOP}/lib/libblocksruntime
LIBBSNMPDIR= ${_LIB_OBJTOP}/lib/libbsnmp/libbsnmp
LIBCASPERDIR= ${_LIB_OBJTOP}/lib/libcasper/libcasper
LIBCAP_DNSDIR= ${_LIB_OBJTOP}/lib/libcasper/services/cap_dns
LIBCAP_GRPDIR= ${_LIB_OBJTOP}/lib/libcasper/services/cap_grp
LIBCAP_NETDIR= ${_LIB_OBJTOP}/lib/libcasper/services/cap_net
LIBCAP_PWDDIR= ${_LIB_OBJTOP}/lib/libcasper/services/cap_pwd
LIBCAP_SYSCTLDIR= ${_LIB_OBJTOP}/lib/libcasper/services/cap_sysctl
LIBCAP_SYSLOGDIR= ${_LIB_OBJTOP}/lib/libcasper/services/cap_syslog
LIBCBORDIR= ${_LIB_OBJTOP}/lib/libcbor
LIBBSDXMLDIR= ${_LIB_OBJTOP}/lib/libexpat
LIBFIDO2DIR= ${_LIB_OBJTOP}/lib/libfido2
LIBKVMDIR= ${_LIB_OBJTOP}/lib/libkvm
LIBPTHREADDIR= ${_LIB_OBJTOP}/lib/libthr
LIBMDIR= ${_LIB_OBJTOP}/lib/msun
LIBFORMWDIR= ${_LIB_OBJTOP}/lib/ncurses/form
LIBMENUWDIR= ${_LIB_OBJTOP}/lib/ncurses/menu
LIBNCURSESWDIR= ${_LIB_OBJTOP}/lib/ncurses/ncurses
LIBTINFOWDIR= ${_LIB_OBJTOP}/lib/ncurses/tinfo
LIBPANELWDIR= ${_LIB_OBJTOP}/lib/ncurses/panel
LIBCRYPTODIR= ${_LIB_OBJTOP}/secure/lib/libcrypto
LIBSPLDIR= ${_LIB_OBJTOP}/cddl/lib/libspl
LIBSSHDIR= ${_LIB_OBJTOP}/secure/lib/libssh
LIBSSLDIR= ${_LIB_OBJTOP}/secure/lib/libssl
LIBTEKENDIR= ${_LIB_OBJTOP}/sys/teken/libteken
LIBEGACYDIR= ${_LIB_OBJTOP}/tools/build
LIBLNDIR= ${_LIB_OBJTOP}/usr.bin/lex/lib
LIBTERMCAPWDIR= ${LIBTINFOWDIR}
.-include <site.src.libnames.mk>
# Default other library directories to lib/libNAME.
.for lib in ${_LIBRARIES}
LIB${lib:tu}DIR?= ${OBJTOP}/lib/lib${lib}
.endfor
# Validate that listed LIBADD are valid.
.for _l in ${LIBADD}
.if empty(_LIBRARIES:M${_l})
_BADLIBADD+= ${_l}
.endif
.endfor
.if !empty(_BADLIBADD)
.error ${.CURDIR}: Invalid LIBADD used which may need to be added to ${_this:T}: ${_BADLIBADD}
.endif
# Sanity check that libraries are defined here properly when building them.
.if defined(LIB) && ${_LIBRARIES:M${LIB}} != ""
.if !empty(LIBADD) && \
(!defined(_DP_${LIB}) || ${LIBADD:O:u} != ${_DP_${LIB}:O:u})
.error ${.CURDIR}: Missing or incorrect _DP_${LIB} entry in ${_this:T}. Should match LIBADD for ${LIB} ('${LIBADD}' vs '${_DP_${LIB}}')
.endif
# Note that OBJTOP is not yet defined here but for the purpose of the check
# it is fine as it resolves to the SRC directory.
.if !defined(LIB${LIB:tu}DIR) || !exists(${SRCTOP}/${LIB${LIB:tu}DIR:S,^${OBJTOP}/,,})
.error ${.CURDIR}: Missing or incorrect value for LIB${LIB:tu}DIR in ${_this:T}: ${LIB${LIB:tu}DIR:S,^${OBJTOP}/,,}
.endif
.if ${_INTERNALLIBS:M${LIB}} != "" && !defined(LIB${LIB:tu})
.error ${.CURDIR}: Missing value for LIB${LIB:tu} in ${_this:T}. Likely should be: LIB${LIB:tu}?= $${LIB${LIB:tu}DIR}/lib${LIB}.a
.endif
.endif
.endif # !target(__<src.libnames.mk>__)
diff --git a/sys/contrib/openzfs/META b/sys/contrib/openzfs/META
index 5f834d5cc7c4..e4e770026b57 100644
--- a/sys/contrib/openzfs/META
+++ b/sys/contrib/openzfs/META
@@ -1,10 +1,10 @@
Meta: 1
Name: zfs
Branch: 1.0
Version: 2.2.0
-Release: rc1
+Release: rc3
Release-Tags: relext
License: CDDL
Author: OpenZFS
-Linux-Maximum: 6.3
+Linux-Maximum: 6.4
Linux-Minimum: 3.10
diff --git a/sys/contrib/openzfs/cmd/zdb/zdb.c b/sys/contrib/openzfs/cmd/zdb/zdb.c
index 9568d2bbfe38..4b9921d47b81 100644
--- a/sys/contrib/openzfs/cmd/zdb/zdb.c
+++ b/sys/contrib/openzfs/cmd/zdb/zdb.c
@@ -1,9338 +1,9413 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Nexenta Systems, Inc.
* Copyright (c) 2017, 2018 Lawrence Livermore National Security, LLC.
* Copyright (c) 2015, 2017, Intel Corporation.
* Copyright (c) 2020 Datto Inc.
* Copyright (c) 2020, The FreeBSD Foundation [1]
*
* [1] Portions of this software were developed by Allan Jude
* under sponsorship from the FreeBSD Foundation.
* Copyright (c) 2021 Allan Jude
* Copyright (c) 2021 Toomas Soome <tsoome@me.com>
* Copyright (c) 2023, Klara Inc.
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <ctype.h>
#include <getopt.h>
#include <openssl/evp.h>
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_sa.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/metaslab_impl.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_bookmark.h>
#include <sys/dbuf.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
#include <sys/stat.h>
#include <sys/resource.h>
#include <sys/dmu_send.h>
#include <sys/dmu_traverse.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/zfs_fuid.h>
#include <sys/arc.h>
#include <sys/arc_impl.h>
#include <sys/ddt.h>
#include <sys/zfeature.h>
#include <sys/abd.h>
#include <sys/blkptr.h>
#include <sys/dsl_crypt.h>
#include <sys/dsl_scan.h>
#include <sys/btree.h>
+#include <sys/brt.h>
#include <zfs_comutil.h>
#include <sys/zstd/zstd.h>
#include <libnvpair.h>
#include <libzutil.h>
#include "zdb.h"
#define ZDB_COMPRESS_NAME(idx) ((idx) < ZIO_COMPRESS_FUNCTIONS ? \
zio_compress_table[(idx)].ci_name : "UNKNOWN")
#define ZDB_CHECKSUM_NAME(idx) ((idx) < ZIO_CHECKSUM_FUNCTIONS ? \
zio_checksum_table[(idx)].ci_name : "UNKNOWN")
#define ZDB_OT_TYPE(idx) ((idx) < DMU_OT_NUMTYPES ? (idx) : \
(idx) == DMU_OTN_ZAP_DATA || (idx) == DMU_OTN_ZAP_METADATA ? \
DMU_OT_ZAP_OTHER : \
(idx) == DMU_OTN_UINT64_DATA || (idx) == DMU_OTN_UINT64_METADATA ? \
DMU_OT_UINT64_OTHER : DMU_OT_NUMTYPES)
/* Some platforms require part of inode IDs to be remapped */
#ifdef __APPLE__
#define ZDB_MAP_OBJECT_ID(obj) INO_XNUTOZFS(obj, 2)
#else
#define ZDB_MAP_OBJECT_ID(obj) (obj)
#endif
static const char *
zdb_ot_name(dmu_object_type_t type)
{
if (type < DMU_OT_NUMTYPES)
return (dmu_ot[type].ot_name);
else if ((type & DMU_OT_NEWTYPE) &&
((type & DMU_OT_BYTESWAP_MASK) < DMU_BSWAP_NUMFUNCS))
return (dmu_ot_byteswap[type & DMU_OT_BYTESWAP_MASK].ob_name);
else
return ("UNKNOWN");
}
extern int reference_tracking_enable;
extern int zfs_recover;
extern uint_t zfs_vdev_async_read_max_active;
extern boolean_t spa_load_verify_dryrun;
extern boolean_t spa_mode_readable_spacemaps;
extern uint_t zfs_reconstruct_indirect_combinations_max;
extern uint_t zfs_btree_verify_intensity;
static const char cmdname[] = "zdb";
uint8_t dump_opt[256];
typedef void object_viewer_t(objset_t *, uint64_t, void *data, size_t size);
static uint64_t *zopt_metaslab = NULL;
static unsigned zopt_metaslab_args = 0;
typedef struct zopt_object_range {
uint64_t zor_obj_start;
uint64_t zor_obj_end;
uint64_t zor_flags;
} zopt_object_range_t;
static zopt_object_range_t *zopt_object_ranges = NULL;
static unsigned zopt_object_args = 0;
static int flagbits[256];
#define ZOR_FLAG_PLAIN_FILE 0x0001
#define ZOR_FLAG_DIRECTORY 0x0002
#define ZOR_FLAG_SPACE_MAP 0x0004
#define ZOR_FLAG_ZAP 0x0008
#define ZOR_FLAG_ALL_TYPES -1
#define ZOR_SUPPORTED_FLAGS (ZOR_FLAG_PLAIN_FILE | \
ZOR_FLAG_DIRECTORY | \
ZOR_FLAG_SPACE_MAP | \
ZOR_FLAG_ZAP)
#define ZDB_FLAG_CHECKSUM 0x0001
#define ZDB_FLAG_DECOMPRESS 0x0002
#define ZDB_FLAG_BSWAP 0x0004
#define ZDB_FLAG_GBH 0x0008
#define ZDB_FLAG_INDIRECT 0x0010
#define ZDB_FLAG_RAW 0x0020
#define ZDB_FLAG_PRINT_BLKPTR 0x0040
#define ZDB_FLAG_VERBOSE 0x0080
static uint64_t max_inflight_bytes = 256 * 1024 * 1024; /* 256MB */
static int leaked_objects = 0;
static range_tree_t *mos_refd_objs;
static void snprintf_blkptr_compact(char *, size_t, const blkptr_t *,
boolean_t);
static void mos_obj_refd(uint64_t);
static void mos_obj_refd_multiple(uint64_t);
static int dump_bpobj_cb(void *arg, const blkptr_t *bp, boolean_t free,
dmu_tx_t *tx);
typedef struct sublivelist_verify {
/* FREE's that haven't yet matched to an ALLOC, in one sub-livelist */
zfs_btree_t sv_pair;
/* ALLOC's without a matching FREE, accumulates across sub-livelists */
zfs_btree_t sv_leftover;
} sublivelist_verify_t;
static int
livelist_compare(const void *larg, const void *rarg)
{
const blkptr_t *l = larg;
const blkptr_t *r = rarg;
/* Sort them according to dva[0] */
uint64_t l_dva0_vdev, r_dva0_vdev;
l_dva0_vdev = DVA_GET_VDEV(&l->blk_dva[0]);
r_dva0_vdev = DVA_GET_VDEV(&r->blk_dva[0]);
if (l_dva0_vdev < r_dva0_vdev)
return (-1);
else if (l_dva0_vdev > r_dva0_vdev)
return (+1);
/* if vdevs are equal, sort by offsets. */
uint64_t l_dva0_offset;
uint64_t r_dva0_offset;
l_dva0_offset = DVA_GET_OFFSET(&l->blk_dva[0]);
r_dva0_offset = DVA_GET_OFFSET(&r->blk_dva[0]);
if (l_dva0_offset < r_dva0_offset) {
return (-1);
} else if (l_dva0_offset > r_dva0_offset) {
return (+1);
}
/*
* Since we're storing blkptrs without cancelling FREE/ALLOC pairs,
* it's possible the offsets are equal. In that case, sort by txg
*/
if (l->blk_birth < r->blk_birth) {
return (-1);
} else if (l->blk_birth > r->blk_birth) {
return (+1);
}
return (0);
}
typedef struct sublivelist_verify_block {
dva_t svb_dva;
/*
* We need this to check if the block marked as allocated
* in the livelist was freed (and potentially reallocated)
* in the metaslab spacemaps at a later TXG.
*/
uint64_t svb_allocated_txg;
} sublivelist_verify_block_t;
static void zdb_print_blkptr(const blkptr_t *bp, int flags);
typedef struct sublivelist_verify_block_refcnt {
/* block pointer entry in livelist being verified */
blkptr_t svbr_blk;
/*
* Refcount gets incremented to 1 when we encounter the first
* FREE entry for the svfbr block pointer and a node for it
* is created in our ZDB verification/tracking metadata.
*
* As we encounter more FREE entries we increment this counter
* and similarly decrement it whenever we find the respective
* ALLOC entries for this block.
*
* When the refcount gets to 0 it means that all the FREE and
* ALLOC entries of this block have paired up and we no longer
* need to track it in our verification logic (e.g. the node
* containing this struct in our verification data structure
* should be freed).
*
* [refer to sublivelist_verify_blkptr() for the actual code]
*/
uint32_t svbr_refcnt;
} sublivelist_verify_block_refcnt_t;
static int
sublivelist_block_refcnt_compare(const void *larg, const void *rarg)
{
const sublivelist_verify_block_refcnt_t *l = larg;
const sublivelist_verify_block_refcnt_t *r = rarg;
return (livelist_compare(&l->svbr_blk, &r->svbr_blk));
}
static int
sublivelist_verify_blkptr(void *arg, const blkptr_t *bp, boolean_t free,
dmu_tx_t *tx)
{
ASSERT3P(tx, ==, NULL);
struct sublivelist_verify *sv = arg;
sublivelist_verify_block_refcnt_t current = {
.svbr_blk = *bp,
/*
* Start with 1 in case this is the first free entry.
* This field is not used for our B-Tree comparisons
* anyway.
*/
.svbr_refcnt = 1,
};
zfs_btree_index_t where;
sublivelist_verify_block_refcnt_t *pair =
zfs_btree_find(&sv->sv_pair, &current, &where);
if (free) {
if (pair == NULL) {
/* first free entry for this block pointer */
zfs_btree_add(&sv->sv_pair, &current);
} else {
pair->svbr_refcnt++;
}
} else {
if (pair == NULL) {
/* block that is currently marked as allocated */
for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
if (DVA_IS_EMPTY(&bp->blk_dva[i]))
break;
sublivelist_verify_block_t svb = {
.svb_dva = bp->blk_dva[i],
.svb_allocated_txg = bp->blk_birth
};
if (zfs_btree_find(&sv->sv_leftover, &svb,
&where) == NULL) {
zfs_btree_add_idx(&sv->sv_leftover,
&svb, &where);
}
}
} else {
/* alloc matches a free entry */
pair->svbr_refcnt--;
if (pair->svbr_refcnt == 0) {
/* all allocs and frees have been matched */
zfs_btree_remove_idx(&sv->sv_pair, &where);
}
}
}
return (0);
}
static int
sublivelist_verify_func(void *args, dsl_deadlist_entry_t *dle)
{
int err;
struct sublivelist_verify *sv = args;
zfs_btree_create(&sv->sv_pair, sublivelist_block_refcnt_compare, NULL,
sizeof (sublivelist_verify_block_refcnt_t));
err = bpobj_iterate_nofree(&dle->dle_bpobj, sublivelist_verify_blkptr,
sv, NULL);
sublivelist_verify_block_refcnt_t *e;
zfs_btree_index_t *cookie = NULL;
while ((e = zfs_btree_destroy_nodes(&sv->sv_pair, &cookie)) != NULL) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf),
&e->svbr_blk, B_TRUE);
(void) printf("\tERROR: %d unmatched FREE(s): %s\n",
e->svbr_refcnt, blkbuf);
}
zfs_btree_destroy(&sv->sv_pair);
return (err);
}
static int
livelist_block_compare(const void *larg, const void *rarg)
{
const sublivelist_verify_block_t *l = larg;
const sublivelist_verify_block_t *r = rarg;
if (DVA_GET_VDEV(&l->svb_dva) < DVA_GET_VDEV(&r->svb_dva))
return (-1);
else if (DVA_GET_VDEV(&l->svb_dva) > DVA_GET_VDEV(&r->svb_dva))
return (+1);
if (DVA_GET_OFFSET(&l->svb_dva) < DVA_GET_OFFSET(&r->svb_dva))
return (-1);
else if (DVA_GET_OFFSET(&l->svb_dva) > DVA_GET_OFFSET(&r->svb_dva))
return (+1);
if (DVA_GET_ASIZE(&l->svb_dva) < DVA_GET_ASIZE(&r->svb_dva))
return (-1);
else if (DVA_GET_ASIZE(&l->svb_dva) > DVA_GET_ASIZE(&r->svb_dva))
return (+1);
return (0);
}
/*
* Check for errors in a livelist while tracking all unfreed ALLOCs in the
* sublivelist_verify_t: sv->sv_leftover
*/
static void
livelist_verify(dsl_deadlist_t *dl, void *arg)
{
sublivelist_verify_t *sv = arg;
dsl_deadlist_iterate(dl, sublivelist_verify_func, sv);
}
/*
* Check for errors in the livelist entry and discard the intermediary
* data structures
*/
static int
sublivelist_verify_lightweight(void *args, dsl_deadlist_entry_t *dle)
{
(void) args;
sublivelist_verify_t sv;
zfs_btree_create(&sv.sv_leftover, livelist_block_compare, NULL,
sizeof (sublivelist_verify_block_t));
int err = sublivelist_verify_func(&sv, dle);
zfs_btree_clear(&sv.sv_leftover);
zfs_btree_destroy(&sv.sv_leftover);
return (err);
}
typedef struct metaslab_verify {
/*
* Tree containing all the leftover ALLOCs from the livelists
* that are part of this metaslab.
*/
zfs_btree_t mv_livelist_allocs;
/*
* Metaslab information.
*/
uint64_t mv_vdid;
uint64_t mv_msid;
uint64_t mv_start;
uint64_t mv_end;
/*
* What's currently allocated for this metaslab.
*/
range_tree_t *mv_allocated;
} metaslab_verify_t;
typedef void ll_iter_t(dsl_deadlist_t *ll, void *arg);
typedef int (*zdb_log_sm_cb_t)(spa_t *spa, space_map_entry_t *sme, uint64_t txg,
void *arg);
typedef struct unflushed_iter_cb_arg {
spa_t *uic_spa;
uint64_t uic_txg;
void *uic_arg;
zdb_log_sm_cb_t uic_cb;
} unflushed_iter_cb_arg_t;
static int
iterate_through_spacemap_logs_cb(space_map_entry_t *sme, void *arg)
{
unflushed_iter_cb_arg_t *uic = arg;
return (uic->uic_cb(uic->uic_spa, sme, uic->uic_txg, uic->uic_arg));
}
static void
iterate_through_spacemap_logs(spa_t *spa, zdb_log_sm_cb_t cb, void *arg)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
space_map_t *sm = NULL;
VERIFY0(space_map_open(&sm, spa_meta_objset(spa),
sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT));
unflushed_iter_cb_arg_t uic = {
.uic_spa = spa,
.uic_txg = sls->sls_txg,
.uic_arg = arg,
.uic_cb = cb
};
VERIFY0(space_map_iterate(sm, space_map_length(sm),
iterate_through_spacemap_logs_cb, &uic));
space_map_close(sm);
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
static void
verify_livelist_allocs(metaslab_verify_t *mv, uint64_t txg,
uint64_t offset, uint64_t size)
{
sublivelist_verify_block_t svb = {{{0}}};
DVA_SET_VDEV(&svb.svb_dva, mv->mv_vdid);
DVA_SET_OFFSET(&svb.svb_dva, offset);
DVA_SET_ASIZE(&svb.svb_dva, size);
zfs_btree_index_t where;
uint64_t end_offset = offset + size;
/*
* Look for an exact match for spacemap entry in the livelist entries.
* Then, look for other livelist entries that fall within the range
* of the spacemap entry as it may have been condensed
*/
sublivelist_verify_block_t *found =
zfs_btree_find(&mv->mv_livelist_allocs, &svb, &where);
if (found == NULL) {
found = zfs_btree_next(&mv->mv_livelist_allocs, &where, &where);
}
for (; found != NULL && DVA_GET_VDEV(&found->svb_dva) == mv->mv_vdid &&
DVA_GET_OFFSET(&found->svb_dva) < end_offset;
found = zfs_btree_next(&mv->mv_livelist_allocs, &where, &where)) {
if (found->svb_allocated_txg <= txg) {
(void) printf("ERROR: Livelist ALLOC [%llx:%llx] "
"from TXG %llx FREED at TXG %llx\n",
(u_longlong_t)DVA_GET_OFFSET(&found->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&found->svb_dva),
(u_longlong_t)found->svb_allocated_txg,
(u_longlong_t)txg);
}
}
}
static int
metaslab_spacemap_validation_cb(space_map_entry_t *sme, void *arg)
{
metaslab_verify_t *mv = arg;
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
uint64_t txg = sme->sme_txg;
if (sme->sme_type == SM_ALLOC) {
if (range_tree_contains(mv->mv_allocated,
offset, size)) {
(void) printf("ERROR: DOUBLE ALLOC: "
"%llu [%llx:%llx] "
"%llu:%llu LOG_SM\n",
(u_longlong_t)txg, (u_longlong_t)offset,
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid);
} else {
range_tree_add(mv->mv_allocated,
offset, size);
}
} else {
if (!range_tree_contains(mv->mv_allocated,
offset, size)) {
(void) printf("ERROR: DOUBLE FREE: "
"%llu [%llx:%llx] "
"%llu:%llu LOG_SM\n",
(u_longlong_t)txg, (u_longlong_t)offset,
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid);
} else {
range_tree_remove(mv->mv_allocated,
offset, size);
}
}
if (sme->sme_type != SM_ALLOC) {
/*
* If something is freed in the spacemap, verify that
* it is not listed as allocated in the livelist.
*/
verify_livelist_allocs(mv, txg, offset, size);
}
return (0);
}
static int
spacemap_check_sm_log_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
metaslab_verify_t *mv = arg;
uint64_t offset = sme->sme_offset;
uint64_t vdev_id = sme->sme_vdev;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
/* skip indirect vdevs */
if (!vdev_is_concrete(vd))
return (0);
if (vdev_id != mv->mv_vdid)
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
if (ms->ms_id != mv->mv_msid)
return (0);
if (txg < metaslab_unflushed_txg(ms))
return (0);
ASSERT3U(txg, ==, sme->sme_txg);
return (metaslab_spacemap_validation_cb(sme, mv));
}
static void
spacemap_check_sm_log(spa_t *spa, metaslab_verify_t *mv)
{
iterate_through_spacemap_logs(spa, spacemap_check_sm_log_cb, mv);
}
static void
spacemap_check_ms_sm(space_map_t *sm, metaslab_verify_t *mv)
{
if (sm == NULL)
return;
VERIFY0(space_map_iterate(sm, space_map_length(sm),
metaslab_spacemap_validation_cb, mv));
}
static void iterate_deleted_livelists(spa_t *spa, ll_iter_t func, void *arg);
/*
* Transfer blocks from sv_leftover tree to the mv_livelist_allocs if
* they are part of that metaslab (mv_msid).
*/
static void
mv_populate_livelist_allocs(metaslab_verify_t *mv, sublivelist_verify_t *sv)
{
zfs_btree_index_t where;
sublivelist_verify_block_t *svb;
ASSERT3U(zfs_btree_numnodes(&mv->mv_livelist_allocs), ==, 0);
for (svb = zfs_btree_first(&sv->sv_leftover, &where);
svb != NULL;
svb = zfs_btree_next(&sv->sv_leftover, &where, &where)) {
if (DVA_GET_VDEV(&svb->svb_dva) != mv->mv_vdid)
continue;
if (DVA_GET_OFFSET(&svb->svb_dva) < mv->mv_start &&
(DVA_GET_OFFSET(&svb->svb_dva) +
DVA_GET_ASIZE(&svb->svb_dva)) > mv->mv_start) {
(void) printf("ERROR: Found block that crosses "
"metaslab boundary: <%llu:%llx:%llx>\n",
(u_longlong_t)DVA_GET_VDEV(&svb->svb_dva),
(u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva));
continue;
}
if (DVA_GET_OFFSET(&svb->svb_dva) < mv->mv_start)
continue;
if (DVA_GET_OFFSET(&svb->svb_dva) >= mv->mv_end)
continue;
if ((DVA_GET_OFFSET(&svb->svb_dva) +
DVA_GET_ASIZE(&svb->svb_dva)) > mv->mv_end) {
(void) printf("ERROR: Found block that crosses "
"metaslab boundary: <%llu:%llx:%llx>\n",
(u_longlong_t)DVA_GET_VDEV(&svb->svb_dva),
(u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva));
continue;
}
zfs_btree_add(&mv->mv_livelist_allocs, svb);
}
for (svb = zfs_btree_first(&mv->mv_livelist_allocs, &where);
svb != NULL;
svb = zfs_btree_next(&mv->mv_livelist_allocs, &where, &where)) {
zfs_btree_remove(&sv->sv_leftover, svb);
}
}
/*
* [Livelist Check]
* Iterate through all the sublivelists and:
* - report leftover frees (**)
* - record leftover ALLOCs together with their TXG [see Cross Check]
*
* (**) Note: Double ALLOCs are valid in datasets that have dedup
* enabled. Similarly double FREEs are allowed as well but
* only if they pair up with a corresponding ALLOC entry once
* we our done with our sublivelist iteration.
*
* [Spacemap Check]
* for each metaslab:
* - iterate over spacemap and then the metaslab's entries in the
* spacemap log, then report any double FREEs and ALLOCs (do not
* blow up).
*
* [Cross Check]
* After finishing the Livelist Check phase and while being in the
* Spacemap Check phase, we find all the recorded leftover ALLOCs
* of the livelist check that are part of the metaslab that we are
* currently looking at in the Spacemap Check. We report any entries
* that are marked as ALLOCs in the livelists but have been actually
* freed (and potentially allocated again) after their TXG stamp in
* the spacemaps. Also report any ALLOCs from the livelists that
* belong to indirect vdevs (e.g. their vdev completed removal).
*
* Note that this will miss Log Spacemap entries that cancelled each other
* out before being flushed to the metaslab, so we are not guaranteed
* to match all erroneous ALLOCs.
*/
static void
livelist_metaslab_validate(spa_t *spa)
{
(void) printf("Verifying deleted livelist entries\n");
sublivelist_verify_t sv;
zfs_btree_create(&sv.sv_leftover, livelist_block_compare, NULL,
sizeof (sublivelist_verify_block_t));
iterate_deleted_livelists(spa, livelist_verify, &sv);
(void) printf("Verifying metaslab entries\n");
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (!vdev_is_concrete(vd))
continue;
for (uint64_t mid = 0; mid < vd->vdev_ms_count; mid++) {
metaslab_t *m = vd->vdev_ms[mid];
(void) fprintf(stderr,
"\rverifying concrete vdev %llu, "
"metaslab %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)mid,
(longlong_t)vd->vdev_ms_count);
uint64_t shift, start;
range_seg_type_t type =
metaslab_calculate_range_tree_type(vd, m,
&start, &shift);
metaslab_verify_t mv;
mv.mv_allocated = range_tree_create(NULL,
type, NULL, start, shift);
mv.mv_vdid = vd->vdev_id;
mv.mv_msid = m->ms_id;
mv.mv_start = m->ms_start;
mv.mv_end = m->ms_start + m->ms_size;
zfs_btree_create(&mv.mv_livelist_allocs,
livelist_block_compare, NULL,
sizeof (sublivelist_verify_block_t));
mv_populate_livelist_allocs(&mv, &sv);
spacemap_check_ms_sm(m->ms_sm, &mv);
spacemap_check_sm_log(spa, &mv);
range_tree_vacate(mv.mv_allocated, NULL, NULL);
range_tree_destroy(mv.mv_allocated);
zfs_btree_clear(&mv.mv_livelist_allocs);
zfs_btree_destroy(&mv.mv_livelist_allocs);
}
}
(void) fprintf(stderr, "\n");
/*
* If there are any segments in the leftover tree after we walked
* through all the metaslabs in the concrete vdevs then this means
* that we have segments in the livelists that belong to indirect
* vdevs and are marked as allocated.
*/
if (zfs_btree_numnodes(&sv.sv_leftover) == 0) {
zfs_btree_destroy(&sv.sv_leftover);
return;
}
(void) printf("ERROR: Found livelist blocks marked as allocated "
"for indirect vdevs:\n");
zfs_btree_index_t *where = NULL;
sublivelist_verify_block_t *svb;
while ((svb = zfs_btree_destroy_nodes(&sv.sv_leftover, &where)) !=
NULL) {
int vdev_id = DVA_GET_VDEV(&svb->svb_dva);
ASSERT3U(vdev_id, <, rvd->vdev_children);
vdev_t *vd = rvd->vdev_child[vdev_id];
ASSERT(!vdev_is_concrete(vd));
(void) printf("<%d:%llx:%llx> TXG %llx\n",
vdev_id, (u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva),
(u_longlong_t)svb->svb_allocated_txg);
}
(void) printf("\n");
zfs_btree_destroy(&sv.sv_leftover);
}
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
*/
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
static void
usage(void)
{
(void) fprintf(stderr,
"Usage:\t%s [-AbcdDFGhikLMPsvXy] [-e [-V] [-p <path> ...]] "
"[-I <inflight I/Os>]\n"
"\t\t[-o <var>=<value>]... [-t <txg>] [-U <cache>] [-x <dumpdir>]\n"
"\t\t[-K <key>]\n"
"\t\t[<poolname>[/<dataset | objset id>] [<object | range> ...]]\n"
"\t%s [-AdiPv] [-e [-V] [-p <path> ...]] [-U <cache>] [-K <key>]\n"
"\t\t[<poolname>[/<dataset | objset id>] [<object | range> ...]\n"
"\t%s -B [-e [-V] [-p <path> ...]] [-I <inflight I/Os>]\n"
"\t\t[-o <var>=<value>]... [-t <txg>] [-U <cache>] [-x <dumpdir>]\n"
"\t\t[-K <key>] <poolname>/<objset id> [<backupflags>]\n"
"\t%s [-v] <bookmark>\n"
"\t%s -C [-A] [-U <cache>] [<poolname>]\n"
"\t%s -l [-Aqu] <device>\n"
"\t%s -m [-AFLPX] [-e [-V] [-p <path> ...]] [-t <txg>] "
"[-U <cache>]\n\t\t<poolname> [<vdev> [<metaslab> ...]]\n"
"\t%s -O [-K <key>] <dataset> <path>\n"
"\t%s -r [-K <key>] <dataset> <path> <destination>\n"
"\t%s -R [-A] [-e [-V] [-p <path> ...]] [-U <cache>]\n"
"\t\t<poolname> <vdev>:<offset>:<size>[:<flags>]\n"
"\t%s -E [-A] word0:word1:...:word15\n"
"\t%s -S [-AP] [-e [-V] [-p <path> ...]] [-U <cache>] "
"<poolname>\n\n",
cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, cmdname,
cmdname, cmdname, cmdname, cmdname, cmdname);
(void) fprintf(stderr, " Dataset name must include at least one "
"separator character '/' or '@'\n");
(void) fprintf(stderr, " If dataset name is specified, only that "
"dataset is dumped\n");
(void) fprintf(stderr, " If object numbers or object number "
"ranges are specified, only those\n"
" objects or ranges are dumped.\n\n");
(void) fprintf(stderr,
" Object ranges take the form <start>:<end>[:<flags>]\n"
" start Starting object number\n"
" end Ending object number, or -1 for no upper bound\n"
" flags Optional flags to select object types:\n"
" A All objects (this is the default)\n"
" d ZFS directories\n"
" f ZFS files \n"
" m SPA space maps\n"
" z ZAPs\n"
" - Negate effect of next flag\n\n");
(void) fprintf(stderr, " Options to control amount of output:\n");
(void) fprintf(stderr, " -b --block-stats "
"block statistics\n");
(void) fprintf(stderr, " -B --backup "
"backup stream\n");
(void) fprintf(stderr, " -c --checksum "
"checksum all metadata (twice for all data) blocks\n");
(void) fprintf(stderr, " -C --config "
"config (or cachefile if alone)\n");
(void) fprintf(stderr, " -d --datasets "
"dataset(s)\n");
(void) fprintf(stderr, " -D --dedup-stats "
"dedup statistics\n");
(void) fprintf(stderr, " -E --embedded-block-pointer=INTEGER\n"
" decode and display block "
"from an embedded block pointer\n");
(void) fprintf(stderr, " -h --history "
"pool history\n");
(void) fprintf(stderr, " -i --intent-logs "
"intent logs\n");
(void) fprintf(stderr, " -l --label "
"read label contents\n");
(void) fprintf(stderr, " -k --checkpointed-state "
"examine the checkpointed state of the pool\n");
(void) fprintf(stderr, " -L --disable-leak-tracking "
"disable leak tracking (do not load spacemaps)\n");
(void) fprintf(stderr, " -m --metaslabs "
"metaslabs\n");
(void) fprintf(stderr, " -M --metaslab-groups "
"metaslab groups\n");
(void) fprintf(stderr, " -O --object-lookups "
"perform object lookups by path\n");
(void) fprintf(stderr, " -r --copy-object "
"copy an object by path to file\n");
(void) fprintf(stderr, " -R --read-block "
"read and display block from a device\n");
(void) fprintf(stderr, " -s --io-stats "
"report stats on zdb's I/O\n");
(void) fprintf(stderr, " -S --simulate-dedup "
"simulate dedup to measure effect\n");
(void) fprintf(stderr, " -v --verbose "
"verbose (applies to all others)\n");
(void) fprintf(stderr, " -y --livelist "
"perform livelist and metaslab validation on any livelists being "
"deleted\n\n");
(void) fprintf(stderr, " Below options are intended for use "
"with other options:\n");
(void) fprintf(stderr, " -A --ignore-assertions "
"ignore assertions (-A), enable panic recovery (-AA) or both "
"(-AAA)\n");
(void) fprintf(stderr, " -e --exported "
"pool is exported/destroyed/has altroot/not in a cachefile\n");
(void) fprintf(stderr, " -F --automatic-rewind "
"attempt automatic rewind within safe range of transaction "
"groups\n");
(void) fprintf(stderr, " -G --dump-debug-msg "
"dump zfs_dbgmsg buffer before exiting\n");
(void) fprintf(stderr, " -I --inflight=INTEGER "
"specify the maximum number of checksumming I/Os "
"[default is 200]\n");
(void) fprintf(stderr, " -K --key=KEY "
"decryption key for encrypted dataset\n");
(void) fprintf(stderr, " -o --option=\"OPTION=INTEGER\" "
"set global variable to an unsigned 32-bit integer\n");
(void) fprintf(stderr, " -p --path==PATH "
"use one or more with -e to specify path to vdev dir\n");
(void) fprintf(stderr, " -P --parseable "
"print numbers in parseable form\n");
(void) fprintf(stderr, " -q --skip-label "
"don't print label contents\n");
(void) fprintf(stderr, " -t --txg=INTEGER "
"highest txg to use when searching for uberblocks\n");
(void) fprintf(stderr, " -u --uberblock "
"uberblock\n");
(void) fprintf(stderr, " -U --cachefile=PATH "
"use alternate cachefile\n");
(void) fprintf(stderr, " -V --verbatim "
"do verbatim import\n");
(void) fprintf(stderr, " -x --dump-blocks=PATH "
"dump all read blocks into specified directory\n");
(void) fprintf(stderr, " -X --extreme-rewind "
"attempt extreme rewind (does not work with dataset)\n");
(void) fprintf(stderr, " -Y --all-reconstruction "
"attempt all reconstruction combinations for split blocks\n");
(void) fprintf(stderr, " -Z --zstd-headers "
"show ZSTD headers \n");
(void) fprintf(stderr, "Specify an option more than once (e.g. -bb) "
"to make only that option verbose\n");
(void) fprintf(stderr, "Default is to dump everything non-verbosely\n");
exit(1);
}
static void
dump_debug_buffer(void)
{
if (dump_opt['G']) {
(void) printf("\n");
(void) fflush(stdout);
zfs_dbgmsg_print("zdb");
}
}
/*
* Called for usage errors that are discovered after a call to spa_open(),
* dmu_bonus_hold(), or pool_match(). abort() is called for other errors.
*/
static void
fatal(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
(void) fprintf(stderr, "%s: ", cmdname);
(void) vfprintf(stderr, fmt, ap);
va_end(ap);
(void) fprintf(stderr, "\n");
dump_debug_buffer();
exit(1);
}
static void
dump_packed_nvlist(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) size;
nvlist_t *nv;
size_t nvsize = *(uint64_t *)data;
char *packed = umem_alloc(nvsize, UMEM_NOFAIL);
VERIFY(0 == dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH));
VERIFY(nvlist_unpack(packed, nvsize, &nv, 0) == 0);
umem_free(packed, nvsize);
dump_nvlist(nv, 8);
nvlist_free(nv);
}
static void
dump_history_offsets(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) size;
spa_history_phys_t *shp = data;
if (shp == NULL)
return;
(void) printf("\t\tpool_create_len = %llu\n",
(u_longlong_t)shp->sh_pool_create_len);
(void) printf("\t\tphys_max_off = %llu\n",
(u_longlong_t)shp->sh_phys_max_off);
(void) printf("\t\tbof = %llu\n",
(u_longlong_t)shp->sh_bof);
(void) printf("\t\teof = %llu\n",
(u_longlong_t)shp->sh_eof);
(void) printf("\t\trecords_lost = %llu\n",
(u_longlong_t)shp->sh_records_lost);
}
static void
zdb_nicenum(uint64_t num, char *buf, size_t buflen)
{
if (dump_opt['P'])
(void) snprintf(buf, buflen, "%llu", (longlong_t)num);
else
nicenum(num, buf, buflen);
}
static const char histo_stars[] = "****************************************";
static const uint64_t histo_width = sizeof (histo_stars) - 1;
static void
dump_histogram(const uint64_t *histo, int size, int offset)
{
int i;
int minidx = size - 1;
int maxidx = 0;
uint64_t max = 0;
for (i = 0; i < size; i++) {
if (histo[i] == 0)
continue;
if (histo[i] > max)
max = histo[i];
if (i > maxidx)
maxidx = i;
if (i < minidx)
minidx = i;
}
if (max < histo_width)
max = histo_width;
for (i = minidx; i <= maxidx; i++) {
(void) printf("\t\t\t%3u: %6llu %s\n",
i + offset, (u_longlong_t)histo[i],
&histo_stars[(max - histo[i]) * histo_width / max]);
}
}
static void
dump_zap_stats(objset_t *os, uint64_t object)
{
int error;
zap_stats_t zs;
error = zap_get_stats(os, object, &zs);
if (error)
return;
if (zs.zs_ptrtbl_len == 0) {
ASSERT(zs.zs_num_blocks == 1);
(void) printf("\tmicrozap: %llu bytes, %llu entries\n",
(u_longlong_t)zs.zs_blocksize,
(u_longlong_t)zs.zs_num_entries);
return;
}
(void) printf("\tFat ZAP stats:\n");
(void) printf("\t\tPointer table:\n");
(void) printf("\t\t\t%llu elements\n",
(u_longlong_t)zs.zs_ptrtbl_len);
(void) printf("\t\t\tzt_blk: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_blk);
(void) printf("\t\t\tzt_numblks: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_numblks);
(void) printf("\t\t\tzt_shift: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_shift);
(void) printf("\t\t\tzt_blks_copied: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_blks_copied);
(void) printf("\t\t\tzt_nextblk: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_nextblk);
(void) printf("\t\tZAP entries: %llu\n",
(u_longlong_t)zs.zs_num_entries);
(void) printf("\t\tLeaf blocks: %llu\n",
(u_longlong_t)zs.zs_num_leafs);
(void) printf("\t\tTotal blocks: %llu\n",
(u_longlong_t)zs.zs_num_blocks);
(void) printf("\t\tzap_block_type: 0x%llx\n",
(u_longlong_t)zs.zs_block_type);
(void) printf("\t\tzap_magic: 0x%llx\n",
(u_longlong_t)zs.zs_magic);
(void) printf("\t\tzap_salt: 0x%llx\n",
(u_longlong_t)zs.zs_salt);
(void) printf("\t\tLeafs with 2^n pointers:\n");
dump_histogram(zs.zs_leafs_with_2n_pointers, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBlocks with n*5 entries:\n");
dump_histogram(zs.zs_blocks_with_n5_entries, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBlocks n/10 full:\n");
dump_histogram(zs.zs_blocks_n_tenths_full, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tEntries with n chunks:\n");
dump_histogram(zs.zs_entries_using_n_chunks, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBuckets with n entries:\n");
dump_histogram(zs.zs_buckets_with_n_entries, ZAP_HISTOGRAM_SIZE, 0);
}
static void
dump_none(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static void
dump_unknown(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
(void) printf("\tUNKNOWN OBJECT TYPE\n");
}
static void
dump_uint8(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static void
dump_uint64(objset_t *os, uint64_t object, void *data, size_t size)
{
uint64_t *arr;
uint64_t oursize;
if (dump_opt['d'] < 6)
return;
if (data == NULL) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(os, object, &doi));
size = doi.doi_max_offset;
/*
* We cap the size at 1 mebibyte here to prevent
* allocation failures and nigh-infinite printing if the
* object is extremely large.
*/
oursize = MIN(size, 1 << 20);
arr = kmem_alloc(oursize, KM_SLEEP);
int err = dmu_read(os, object, 0, oursize, arr, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(arr, oursize);
return;
}
} else {
/*
* Even though the allocation is already done in this code path,
* we still cap the size to prevent excessive printing.
*/
oursize = MIN(size, 1 << 20);
arr = data;
}
if (size == 0) {
if (data == NULL)
kmem_free(arr, oursize);
(void) printf("\t\t[]\n");
return;
}
(void) printf("\t\t[%0llx", (u_longlong_t)arr[0]);
for (size_t i = 1; i * sizeof (uint64_t) < oursize; i++) {
if (i % 4 != 0)
(void) printf(", %0llx", (u_longlong_t)arr[i]);
else
(void) printf(",\n\t\t%0llx", (u_longlong_t)arr[i]);
}
if (oursize != size)
(void) printf(", ... ");
(void) printf("]\n");
if (data == NULL)
kmem_free(arr, oursize);
}
static void
dump_zap(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
zap_cursor_t zc;
zap_attribute_t attr;
void *prop;
unsigned i;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = ", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
prop = umem_zalloc(attr.za_num_integers *
attr.za_integer_length, UMEM_NOFAIL);
(void) zap_lookup(os, object, attr.za_name,
attr.za_integer_length, attr.za_num_integers, prop);
if (attr.za_integer_length == 1) {
if (strcmp(attr.za_name,
DSL_CRYPTO_KEY_MASTER_KEY) == 0 ||
strcmp(attr.za_name,
DSL_CRYPTO_KEY_HMAC_KEY) == 0 ||
strcmp(attr.za_name, DSL_CRYPTO_KEY_IV) == 0 ||
strcmp(attr.za_name, DSL_CRYPTO_KEY_MAC) == 0 ||
strcmp(attr.za_name, DMU_POOL_CHECKSUM_SALT) == 0) {
uint8_t *u8 = prop;
for (i = 0; i < attr.za_num_integers; i++) {
(void) printf("%02x", u8[i]);
}
} else {
(void) printf("%s", (char *)prop);
}
} else {
for (i = 0; i < attr.za_num_integers; i++) {
switch (attr.za_integer_length) {
case 2:
(void) printf("%u ",
((uint16_t *)prop)[i]);
break;
case 4:
(void) printf("%u ",
((uint32_t *)prop)[i]);
break;
case 8:
(void) printf("%lld ",
(u_longlong_t)((int64_t *)prop)[i]);
break;
}
}
}
(void) printf("\n");
umem_free(prop, attr.za_num_integers * attr.za_integer_length);
}
zap_cursor_fini(&zc);
}
static void
dump_bpobj(objset_t *os, uint64_t object, void *data, size_t size)
{
bpobj_phys_t *bpop = data;
uint64_t i;
char bytes[32], comp[32], uncomp[32];
/* make sure the output won't get truncated */
_Static_assert(sizeof (bytes) >= NN_NUMBUF_SZ, "bytes truncated");
_Static_assert(sizeof (comp) >= NN_NUMBUF_SZ, "comp truncated");
_Static_assert(sizeof (uncomp) >= NN_NUMBUF_SZ, "uncomp truncated");
if (bpop == NULL)
return;
zdb_nicenum(bpop->bpo_bytes, bytes, sizeof (bytes));
zdb_nicenum(bpop->bpo_comp, comp, sizeof (comp));
zdb_nicenum(bpop->bpo_uncomp, uncomp, sizeof (uncomp));
(void) printf("\t\tnum_blkptrs = %llu\n",
(u_longlong_t)bpop->bpo_num_blkptrs);
(void) printf("\t\tbytes = %s\n", bytes);
if (size >= BPOBJ_SIZE_V1) {
(void) printf("\t\tcomp = %s\n", comp);
(void) printf("\t\tuncomp = %s\n", uncomp);
}
if (size >= BPOBJ_SIZE_V2) {
(void) printf("\t\tsubobjs = %llu\n",
(u_longlong_t)bpop->bpo_subobjs);
(void) printf("\t\tnum_subobjs = %llu\n",
(u_longlong_t)bpop->bpo_num_subobjs);
}
if (size >= sizeof (*bpop)) {
(void) printf("\t\tnum_freed = %llu\n",
(u_longlong_t)bpop->bpo_num_freed);
}
if (dump_opt['d'] < 5)
return;
for (i = 0; i < bpop->bpo_num_blkptrs; i++) {
char blkbuf[BP_SPRINTF_LEN];
blkptr_t bp;
int err = dmu_read(os, object,
i * sizeof (bp), sizeof (bp), &bp, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
break;
}
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), &bp,
BP_GET_FREE(&bp));
(void) printf("\t%s\n", blkbuf);
}
}
static void
dump_bpobj_subobjs(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
dmu_object_info_t doi;
int64_t i;
VERIFY0(dmu_object_info(os, object, &doi));
uint64_t *subobjs = kmem_alloc(doi.doi_max_offset, KM_SLEEP);
int err = dmu_read(os, object, 0, doi.doi_max_offset, subobjs, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(subobjs, doi.doi_max_offset);
return;
}
int64_t last_nonzero = -1;
for (i = 0; i < doi.doi_max_offset / 8; i++) {
if (subobjs[i] != 0)
last_nonzero = i;
}
for (i = 0; i <= last_nonzero; i++) {
(void) printf("\t%llu\n", (u_longlong_t)subobjs[i]);
}
kmem_free(subobjs, doi.doi_max_offset);
}
static void
dump_ddt_zap(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
dump_zap_stats(os, object);
/* contents are printed elsewhere, properly decoded */
}
static void
dump_sa_attrs(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
zap_cursor_t zc;
zap_attribute_t attr;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = ", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
(void) printf(" %llx : [%d:%d:%d]\n",
(u_longlong_t)attr.za_first_integer,
(int)ATTR_LENGTH(attr.za_first_integer),
(int)ATTR_BSWAP(attr.za_first_integer),
(int)ATTR_NUM(attr.za_first_integer));
}
zap_cursor_fini(&zc);
}
static void
dump_sa_layouts(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
zap_cursor_t zc;
zap_attribute_t attr;
uint16_t *layout_attrs;
unsigned i;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = [", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
VERIFY(attr.za_integer_length == 2);
layout_attrs = umem_zalloc(attr.za_num_integers *
attr.za_integer_length, UMEM_NOFAIL);
VERIFY(zap_lookup(os, object, attr.za_name,
attr.za_integer_length,
attr.za_num_integers, layout_attrs) == 0);
for (i = 0; i != attr.za_num_integers; i++)
(void) printf(" %d ", (int)layout_attrs[i]);
(void) printf("]\n");
umem_free(layout_attrs,
attr.za_num_integers * attr.za_integer_length);
}
zap_cursor_fini(&zc);
}
static void
dump_zpldir(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
zap_cursor_t zc;
zap_attribute_t attr;
const char *typenames[] = {
/* 0 */ "not specified",
/* 1 */ "FIFO",
/* 2 */ "Character Device",
/* 3 */ "3 (invalid)",
/* 4 */ "Directory",
/* 5 */ "5 (invalid)",
/* 6 */ "Block Device",
/* 7 */ "7 (invalid)",
/* 8 */ "Regular File",
/* 9 */ "9 (invalid)",
/* 10 */ "Symbolic Link",
/* 11 */ "11 (invalid)",
/* 12 */ "Socket",
/* 13 */ "Door",
/* 14 */ "Event Port",
/* 15 */ "15 (invalid)",
};
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = %lld (type: %s)\n",
attr.za_name, ZFS_DIRENT_OBJ(attr.za_first_integer),
typenames[ZFS_DIRENT_TYPE(attr.za_first_integer)]);
}
zap_cursor_fini(&zc);
}
static int
get_dtl_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_ops->vdev_op_leaf) {
space_map_t *sm = vd->vdev_dtl_sm;
if (sm != NULL &&
sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
return (1);
return (0);
}
for (unsigned c = 0; c < vd->vdev_children; c++)
refcount += get_dtl_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_metaslab_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_top == vd) {
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
space_map_t *sm = vd->vdev_ms[m]->ms_sm;
if (sm != NULL &&
sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
refcount++;
}
}
for (unsigned c = 0; c < vd->vdev_children; c++)
refcount += get_metaslab_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_obsolete_refcount(vdev_t *vd)
{
uint64_t obsolete_sm_object;
int refcount = 0;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (vd->vdev_top == vd && obsolete_sm_object != 0) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(vd->vdev_spa->spa_meta_objset,
obsolete_sm_object, &doi));
if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
refcount++;
}
} else {
ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
ASSERT3U(obsolete_sm_object, ==, 0);
}
for (unsigned c = 0; c < vd->vdev_children; c++) {
refcount += get_obsolete_refcount(vd->vdev_child[c]);
}
return (refcount);
}
static int
get_prev_obsolete_spacemap_refcount(spa_t *spa)
{
uint64_t prev_obj =
spa->spa_condensing_indirect_phys.scip_prev_obsolete_sm_object;
if (prev_obj != 0) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(spa->spa_meta_objset, prev_obj, &doi));
if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
return (1);
}
}
return (0);
}
static int
get_checkpoint_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_top == vd && vd->vdev_top_zap != 0 &&
zap_contains(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) == 0)
refcount++;
for (uint64_t c = 0; c < vd->vdev_children; c++)
refcount += get_checkpoint_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_log_spacemap_refcount(spa_t *spa)
{
return (avl_numnodes(&spa->spa_sm_logs_by_txg));
}
static int
verify_spacemap_refcounts(spa_t *spa)
{
uint64_t expected_refcount = 0;
uint64_t actual_refcount;
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM],
&expected_refcount);
actual_refcount = get_dtl_refcount(spa->spa_root_vdev);
actual_refcount += get_metaslab_refcount(spa->spa_root_vdev);
actual_refcount += get_obsolete_refcount(spa->spa_root_vdev);
actual_refcount += get_prev_obsolete_spacemap_refcount(spa);
actual_refcount += get_checkpoint_refcount(spa->spa_root_vdev);
actual_refcount += get_log_spacemap_refcount(spa);
if (expected_refcount != actual_refcount) {
(void) printf("space map refcount mismatch: expected %lld != "
"actual %lld\n",
(longlong_t)expected_refcount,
(longlong_t)actual_refcount);
return (2);
}
return (0);
}
static void
dump_spacemap(objset_t *os, space_map_t *sm)
{
const char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID",
"INVALID", "INVALID", "INVALID", "INVALID" };
if (sm == NULL)
return;
(void) printf("space map object %llu:\n",
(longlong_t)sm->sm_object);
(void) printf(" smp_length = 0x%llx\n",
(longlong_t)sm->sm_phys->smp_length);
(void) printf(" smp_alloc = 0x%llx\n",
(longlong_t)sm->sm_phys->smp_alloc);
if (dump_opt['d'] < 6 && dump_opt['m'] < 4)
return;
/*
* Print out the freelist entries in both encoded and decoded form.
*/
uint8_t mapshift = sm->sm_shift;
int64_t alloc = 0;
uint64_t word, entry_id = 0;
for (uint64_t offset = 0; offset < space_map_length(sm);
offset += sizeof (word)) {
VERIFY0(dmu_read(os, space_map_object(sm), offset,
sizeof (word), &word, DMU_READ_PREFETCH));
if (sm_entry_is_debug(word)) {
uint64_t de_txg = SM_DEBUG_TXG_DECODE(word);
uint64_t de_sync_pass = SM_DEBUG_SYNCPASS_DECODE(word);
if (de_txg == 0) {
(void) printf(
"\t [%6llu] PADDING\n",
(u_longlong_t)entry_id);
} else {
(void) printf(
"\t [%6llu] %s: txg %llu pass %llu\n",
(u_longlong_t)entry_id,
ddata[SM_DEBUG_ACTION_DECODE(word)],
(u_longlong_t)de_txg,
(u_longlong_t)de_sync_pass);
}
entry_id++;
continue;
}
uint8_t words;
char entry_type;
uint64_t entry_off, entry_run, entry_vdev = SM_NO_VDEVID;
if (sm_entry_is_single_word(word)) {
entry_type = (SM_TYPE_DECODE(word) == SM_ALLOC) ?
'A' : 'F';
entry_off = (SM_OFFSET_DECODE(word) << mapshift) +
sm->sm_start;
entry_run = SM_RUN_DECODE(word) << mapshift;
words = 1;
} else {
/* it is a two-word entry so we read another word */
ASSERT(sm_entry_is_double_word(word));
uint64_t extra_word;
offset += sizeof (extra_word);
VERIFY0(dmu_read(os, space_map_object(sm), offset,
sizeof (extra_word), &extra_word,
DMU_READ_PREFETCH));
ASSERT3U(offset, <=, space_map_length(sm));
entry_run = SM2_RUN_DECODE(word) << mapshift;
entry_vdev = SM2_VDEV_DECODE(word);
entry_type = (SM2_TYPE_DECODE(extra_word) == SM_ALLOC) ?
'A' : 'F';
entry_off = (SM2_OFFSET_DECODE(extra_word) <<
mapshift) + sm->sm_start;
words = 2;
}
(void) printf("\t [%6llu] %c range:"
" %010llx-%010llx size: %06llx vdev: %06llu words: %u\n",
(u_longlong_t)entry_id,
entry_type, (u_longlong_t)entry_off,
(u_longlong_t)(entry_off + entry_run),
(u_longlong_t)entry_run,
(u_longlong_t)entry_vdev, words);
if (entry_type == 'A')
alloc += entry_run;
else
alloc -= entry_run;
entry_id++;
}
if (alloc != space_map_allocated(sm)) {
(void) printf("space_map_object alloc (%lld) INCONSISTENT "
"with space map summary (%lld)\n",
(longlong_t)space_map_allocated(sm), (longlong_t)alloc);
}
}
static void
dump_metaslab_stats(metaslab_t *msp)
{
char maxbuf[32];
range_tree_t *rt = msp->ms_allocatable;
zfs_btree_t *t = &msp->ms_allocatable_by_size;
int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
/* max sure nicenum has enough space */
_Static_assert(sizeof (maxbuf) >= NN_NUMBUF_SZ, "maxbuf truncated");
zdb_nicenum(metaslab_largest_allocatable(msp), maxbuf, sizeof (maxbuf));
(void) printf("\t %25s %10lu %7s %6s %4s %4d%%\n",
"segments", zfs_btree_numnodes(t), "maxsize", maxbuf,
"freepct", free_pct);
(void) printf("\tIn-memory histogram:\n");
dump_histogram(rt->rt_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
static void
dump_metaslab(metaslab_t *msp)
{
vdev_t *vd = msp->ms_group->mg_vd;
spa_t *spa = vd->vdev_spa;
space_map_t *sm = msp->ms_sm;
char freebuf[32];
zdb_nicenum(msp->ms_size - space_map_allocated(sm), freebuf,
sizeof (freebuf));
(void) printf(
"\tmetaslab %6llu offset %12llx spacemap %6llu free %5s\n",
(u_longlong_t)msp->ms_id, (u_longlong_t)msp->ms_start,
(u_longlong_t)space_map_object(sm), freebuf);
if (dump_opt['m'] > 2 && !dump_opt['L']) {
mutex_enter(&msp->ms_lock);
VERIFY0(metaslab_load(msp));
range_tree_stat_verify(msp->ms_allocatable);
dump_metaslab_stats(msp);
metaslab_unload(msp);
mutex_exit(&msp->ms_lock);
}
if (dump_opt['m'] > 1 && sm != NULL &&
spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
/*
* The space map histogram represents free space in chunks
* of sm_shift (i.e. bucket 0 refers to 2^sm_shift).
*/
(void) printf("\tOn-disk histogram:\t\tfragmentation %llu\n",
(u_longlong_t)msp->ms_fragmentation);
dump_histogram(sm->sm_phys->smp_histogram,
SPACE_MAP_HISTOGRAM_SIZE, sm->sm_shift);
}
if (vd->vdev_ops == &vdev_draid_ops)
ASSERT3U(msp->ms_size, <=, 1ULL << vd->vdev_ms_shift);
else
ASSERT3U(msp->ms_size, ==, 1ULL << vd->vdev_ms_shift);
dump_spacemap(spa->spa_meta_objset, msp->ms_sm);
if (spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
(void) printf("\tFlush data:\n\tunflushed txg=%llu\n\n",
(u_longlong_t)metaslab_unflushed_txg(msp));
}
}
static void
print_vdev_metaslab_header(vdev_t *vd)
{
vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
const char *bias_str = "";
if (alloc_bias == VDEV_BIAS_LOG || vd->vdev_islog) {
bias_str = VDEV_ALLOC_BIAS_LOG;
} else if (alloc_bias == VDEV_BIAS_SPECIAL) {
bias_str = VDEV_ALLOC_BIAS_SPECIAL;
} else if (alloc_bias == VDEV_BIAS_DEDUP) {
bias_str = VDEV_ALLOC_BIAS_DEDUP;
}
uint64_t ms_flush_data_obj = 0;
if (vd->vdev_top_zap != 0) {
int error = zap_lookup(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
sizeof (uint64_t), 1, &ms_flush_data_obj);
if (error != ENOENT) {
ASSERT0(error);
}
}
(void) printf("\tvdev %10llu %s",
(u_longlong_t)vd->vdev_id, bias_str);
if (ms_flush_data_obj != 0) {
(void) printf(" ms_unflushed_phys object %llu",
(u_longlong_t)ms_flush_data_obj);
}
(void) printf("\n\t%-10s%5llu %-19s %-15s %-12s\n",
"metaslabs", (u_longlong_t)vd->vdev_ms_count,
"offset", "spacemap", "free");
(void) printf("\t%15s %19s %15s %12s\n",
"---------------", "-------------------",
"---------------", "------------");
}
static void
dump_metaslab_groups(spa_t *spa, boolean_t show_special)
{
vdev_t *rvd = spa->spa_root_vdev;
metaslab_class_t *mc = spa_normal_class(spa);
metaslab_class_t *smc = spa_special_class(spa);
uint64_t fragmentation;
metaslab_class_histogram_verify(mc);
for (unsigned c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (mg == NULL || (mg->mg_class != mc &&
(!show_special || mg->mg_class != smc)))
continue;
metaslab_group_histogram_verify(mg);
mg->mg_fragmentation = metaslab_group_fragmentation(mg);
(void) printf("\tvdev %10llu\t\tmetaslabs%5llu\t\t"
"fragmentation",
(u_longlong_t)tvd->vdev_id,
(u_longlong_t)tvd->vdev_ms_count);
if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
(void) printf("%3s\n", "-");
} else {
(void) printf("%3llu%%\n",
(u_longlong_t)mg->mg_fragmentation);
}
dump_histogram(mg->mg_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
(void) printf("\tpool %s\tfragmentation", spa_name(spa));
fragmentation = metaslab_class_fragmentation(mc);
if (fragmentation == ZFS_FRAG_INVALID)
(void) printf("\t%3s\n", "-");
else
(void) printf("\t%3llu%%\n", (u_longlong_t)fragmentation);
dump_histogram(mc->mc_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
static void
print_vdev_indirect(vdev_t *vd)
{
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
vdev_indirect_births_t *vib = vd->vdev_indirect_births;
if (vim == NULL) {
ASSERT3P(vib, ==, NULL);
return;
}
ASSERT3U(vdev_indirect_mapping_object(vim), ==,
vic->vic_mapping_object);
ASSERT3U(vdev_indirect_births_object(vib), ==,
vic->vic_births_object);
(void) printf("indirect births obj %llu:\n",
(longlong_t)vic->vic_births_object);
(void) printf(" vib_count = %llu\n",
(longlong_t)vdev_indirect_births_count(vib));
for (uint64_t i = 0; i < vdev_indirect_births_count(vib); i++) {
vdev_indirect_birth_entry_phys_t *cur_vibe =
&vib->vib_entries[i];
(void) printf("\toffset %llx -> txg %llu\n",
(longlong_t)cur_vibe->vibe_offset,
(longlong_t)cur_vibe->vibe_phys_birth_txg);
}
(void) printf("\n");
(void) printf("indirect mapping obj %llu:\n",
(longlong_t)vic->vic_mapping_object);
(void) printf(" vim_max_offset = 0x%llx\n",
(longlong_t)vdev_indirect_mapping_max_offset(vim));
(void) printf(" vim_bytes_mapped = 0x%llx\n",
(longlong_t)vdev_indirect_mapping_bytes_mapped(vim));
(void) printf(" vim_count = %llu\n",
(longlong_t)vdev_indirect_mapping_num_entries(vim));
if (dump_opt['d'] <= 5 && dump_opt['m'] <= 3)
return;
uint32_t *counts = vdev_indirect_mapping_load_obsolete_counts(vim);
for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
vdev_indirect_mapping_entry_phys_t *vimep =
&vim->vim_entries[i];
(void) printf("\t<%llx:%llx:%llx> -> "
"<%llx:%llx:%llx> (%x obsolete)\n",
(longlong_t)vd->vdev_id,
(longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
(longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
(longlong_t)DVA_GET_VDEV(&vimep->vimep_dst),
(longlong_t)DVA_GET_OFFSET(&vimep->vimep_dst),
(longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
counts[i]);
}
(void) printf("\n");
uint64_t obsolete_sm_object;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
objset_t *mos = vd->vdev_spa->spa_meta_objset;
(void) printf("obsolete space map object %llu:\n",
(u_longlong_t)obsolete_sm_object);
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT3U(space_map_object(vd->vdev_obsolete_sm), ==,
obsolete_sm_object);
dump_spacemap(mos, vd->vdev_obsolete_sm);
(void) printf("\n");
}
}
static void
dump_metaslabs(spa_t *spa)
{
vdev_t *vd, *rvd = spa->spa_root_vdev;
uint64_t m, c = 0, children = rvd->vdev_children;
(void) printf("\nMetaslabs:\n");
if (!dump_opt['d'] && zopt_metaslab_args > 0) {
c = zopt_metaslab[0];
if (c >= children)
(void) fatal("bad vdev id: %llu", (u_longlong_t)c);
if (zopt_metaslab_args > 1) {
vd = rvd->vdev_child[c];
print_vdev_metaslab_header(vd);
for (m = 1; m < zopt_metaslab_args; m++) {
if (zopt_metaslab[m] < vd->vdev_ms_count)
dump_metaslab(
vd->vdev_ms[zopt_metaslab[m]]);
else
(void) fprintf(stderr, "bad metaslab "
"number %llu\n",
(u_longlong_t)zopt_metaslab[m]);
}
(void) printf("\n");
return;
}
children = c + 1;
}
for (; c < children; c++) {
vd = rvd->vdev_child[c];
print_vdev_metaslab_header(vd);
print_vdev_indirect(vd);
for (m = 0; m < vd->vdev_ms_count; m++)
dump_metaslab(vd->vdev_ms[m]);
(void) printf("\n");
}
}
static void
dump_log_spacemaps(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
(void) printf("\nLog Space Maps in Pool:\n");
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
space_map_t *sm = NULL;
VERIFY0(space_map_open(&sm, spa_meta_objset(spa),
sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT));
(void) printf("Log Spacemap object %llu txg %llu\n",
(u_longlong_t)sls->sls_sm_obj, (u_longlong_t)sls->sls_txg);
dump_spacemap(spa->spa_meta_objset, sm);
space_map_close(sm);
}
(void) printf("\n");
}
static void
dump_dde(const ddt_t *ddt, const ddt_entry_t *dde, uint64_t index)
{
const ddt_phys_t *ddp = dde->dde_phys;
const ddt_key_t *ddk = &dde->dde_key;
const char *types[4] = { "ditto", "single", "double", "triple" };
char blkbuf[BP_SPRINTF_LEN];
blkptr_t blk;
int p;
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0)
continue;
ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk);
snprintf_blkptr(blkbuf, sizeof (blkbuf), &blk);
(void) printf("index %llx refcnt %llu %s %s\n",
(u_longlong_t)index, (u_longlong_t)ddp->ddp_refcnt,
types[p], blkbuf);
}
}
static void
dump_dedup_ratio(const ddt_stat_t *dds)
{
double rL, rP, rD, D, dedup, compress, copies;
if (dds->dds_blocks == 0)
return;
rL = (double)dds->dds_ref_lsize;
rP = (double)dds->dds_ref_psize;
rD = (double)dds->dds_ref_dsize;
D = (double)dds->dds_dsize;
dedup = rD / D;
compress = rL / rP;
copies = rD / rP;
(void) printf("dedup = %.2f, compress = %.2f, copies = %.2f, "
"dedup * compress / copies = %.2f\n\n",
dedup, compress, copies, dedup * compress / copies);
}
static void
dump_ddt(ddt_t *ddt, enum ddt_type type, enum ddt_class class)
{
char name[DDT_NAMELEN];
ddt_entry_t dde;
uint64_t walk = 0;
dmu_object_info_t doi;
uint64_t count, dspace, mspace;
int error;
error = ddt_object_info(ddt, type, class, &doi);
if (error == ENOENT)
return;
ASSERT(error == 0);
error = ddt_object_count(ddt, type, class, &count);
ASSERT(error == 0);
if (count == 0)
return;
dspace = doi.doi_physical_blocks_512 << 9;
mspace = doi.doi_fill_count * doi.doi_data_block_size;
ddt_object_name(ddt, type, class, name);
(void) printf("%s: %llu entries, size %llu on disk, %llu in core\n",
name,
(u_longlong_t)count,
(u_longlong_t)(dspace / count),
(u_longlong_t)(mspace / count));
if (dump_opt['D'] < 3)
return;
zpool_dump_ddt(NULL, &ddt->ddt_histogram[type][class]);
if (dump_opt['D'] < 4)
return;
if (dump_opt['D'] < 5 && class == DDT_CLASS_UNIQUE)
return;
(void) printf("%s contents:\n\n", name);
while ((error = ddt_object_walk(ddt, type, class, &walk, &dde)) == 0)
dump_dde(ddt, &dde, walk);
ASSERT3U(error, ==, ENOENT);
(void) printf("\n");
}
static void
dump_all_ddts(spa_t *spa)
{
ddt_histogram_t ddh_total = {{{0}}};
ddt_stat_t dds_total = {0};
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (enum ddt_class class = 0; class < DDT_CLASSES;
class++) {
dump_ddt(ddt, type, class);
}
}
}
ddt_get_dedup_stats(spa, &dds_total);
if (dds_total.dds_blocks == 0) {
(void) printf("All DDTs are empty\n");
return;
}
(void) printf("\n");
if (dump_opt['D'] > 1) {
(void) printf("DDT histogram (aggregated over all DDTs):\n");
ddt_get_dedup_histogram(spa, &ddh_total);
zpool_dump_ddt(&dds_total, &ddh_total);
}
dump_dedup_ratio(&dds_total);
}
static void
dump_dtl_seg(void *arg, uint64_t start, uint64_t size)
{
char *prefix = arg;
(void) printf("%s [%llu,%llu) length %llu\n",
prefix,
(u_longlong_t)start,
(u_longlong_t)(start + size),
(u_longlong_t)(size));
}
static void
dump_dtl(vdev_t *vd, int indent)
{
spa_t *spa = vd->vdev_spa;
boolean_t required;
const char *name[DTL_TYPES] = { "missing", "partial", "scrub",
"outage" };
char prefix[256];
spa_vdev_state_enter(spa, SCL_NONE);
required = vdev_dtl_required(vd);
(void) spa_vdev_state_exit(spa, NULL, 0);
if (indent == 0)
(void) printf("\nDirty time logs:\n\n");
(void) printf("\t%*s%s [%s]\n", indent, "",
vd->vdev_path ? vd->vdev_path :
vd->vdev_parent ? vd->vdev_ops->vdev_op_type : spa_name(spa),
required ? "DTL-required" : "DTL-expendable");
for (int t = 0; t < DTL_TYPES; t++) {
range_tree_t *rt = vd->vdev_dtl[t];
if (range_tree_space(rt) == 0)
continue;
(void) snprintf(prefix, sizeof (prefix), "\t%*s%s",
indent + 2, "", name[t]);
range_tree_walk(rt, dump_dtl_seg, prefix);
if (dump_opt['d'] > 5 && vd->vdev_children == 0)
dump_spacemap(spa->spa_meta_objset,
vd->vdev_dtl_sm);
}
for (unsigned c = 0; c < vd->vdev_children; c++)
dump_dtl(vd->vdev_child[c], indent + 4);
}
static void
dump_history(spa_t *spa)
{
nvlist_t **events = NULL;
char *buf;
uint64_t resid, len, off = 0;
uint_t num = 0;
int error;
char tbuf[30];
if ((buf = malloc(SPA_OLD_MAXBLOCKSIZE)) == NULL) {
(void) fprintf(stderr, "%s: unable to allocate I/O buffer\n",
__func__);
return;
}
do {
len = SPA_OLD_MAXBLOCKSIZE;
if ((error = spa_history_get(spa, &off, &len, buf)) != 0) {
(void) fprintf(stderr, "Unable to read history: "
"error %d\n", error);
free(buf);
return;
}
if (zpool_history_unpack(buf, len, &resid, &events, &num) != 0)
break;
off -= resid;
} while (len != 0);
(void) printf("\nHistory:\n");
for (unsigned i = 0; i < num; i++) {
boolean_t printed = B_FALSE;
if (nvlist_exists(events[i], ZPOOL_HIST_TIME)) {
time_t tsec;
struct tm t;
tsec = fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_TIME);
(void) localtime_r(&tsec, &t);
(void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
} else {
tbuf[0] = '\0';
}
if (nvlist_exists(events[i], ZPOOL_HIST_CMD)) {
(void) printf("%s %s\n", tbuf,
fnvlist_lookup_string(events[i], ZPOOL_HIST_CMD));
} else if (nvlist_exists(events[i], ZPOOL_HIST_INT_EVENT)) {
uint64_t ievent;
ievent = fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_INT_EVENT);
if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS)
goto next;
(void) printf(" %s [internal %s txg:%ju] %s\n",
tbuf,
zfs_history_event_names[ievent],
fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_TXG),
fnvlist_lookup_string(events[i],
ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(events[i], ZPOOL_HIST_INT_NAME)) {
(void) printf("%s [txg:%ju] %s", tbuf,
fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_TXG),
fnvlist_lookup_string(events[i],
ZPOOL_HIST_INT_NAME));
if (nvlist_exists(events[i], ZPOOL_HIST_DSNAME)) {
(void) printf(" %s (%llu)",
fnvlist_lookup_string(events[i],
ZPOOL_HIST_DSNAME),
(u_longlong_t)fnvlist_lookup_uint64(
events[i],
ZPOOL_HIST_DSID));
}
(void) printf(" %s\n", fnvlist_lookup_string(events[i],
ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(events[i], ZPOOL_HIST_IOCTL)) {
(void) printf("%s ioctl %s\n", tbuf,
fnvlist_lookup_string(events[i],
ZPOOL_HIST_IOCTL));
if (nvlist_exists(events[i], ZPOOL_HIST_INPUT_NVL)) {
(void) printf(" input:\n");
dump_nvlist(fnvlist_lookup_nvlist(events[i],
ZPOOL_HIST_INPUT_NVL), 8);
}
if (nvlist_exists(events[i], ZPOOL_HIST_OUTPUT_NVL)) {
(void) printf(" output:\n");
dump_nvlist(fnvlist_lookup_nvlist(events[i],
ZPOOL_HIST_OUTPUT_NVL), 8);
}
if (nvlist_exists(events[i], ZPOOL_HIST_ERRNO)) {
(void) printf(" errno: %lld\n",
(longlong_t)fnvlist_lookup_int64(events[i],
ZPOOL_HIST_ERRNO));
}
} else {
goto next;
}
printed = B_TRUE;
next:
if (dump_opt['h'] > 1) {
if (!printed)
(void) printf("unrecognized record:\n");
dump_nvlist(events[i], 2);
}
}
free(buf);
}
static void
dump_dnode(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static uint64_t
blkid2offset(const dnode_phys_t *dnp, const blkptr_t *bp,
const zbookmark_phys_t *zb)
{
if (dnp == NULL) {
ASSERT(zb->zb_level < 0);
if (zb->zb_object == 0)
return (zb->zb_blkid);
return (zb->zb_blkid * BP_GET_LSIZE(bp));
}
ASSERT(zb->zb_level >= 0);
return ((zb->zb_blkid <<
(zb->zb_level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) *
dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
}
static void
snprintf_zstd_header(spa_t *spa, char *blkbuf, size_t buflen,
const blkptr_t *bp)
{
abd_t *pabd;
void *buf;
zio_t *zio;
zfs_zstdhdr_t zstd_hdr;
int error;
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_ZSTD)
return;
if (BP_IS_HOLE(bp))
return;
if (BP_IS_EMBEDDED(bp)) {
buf = malloc(SPA_MAXBLOCKSIZE);
if (buf == NULL) {
(void) fprintf(stderr, "out of memory\n");
exit(1);
}
decode_embedded_bp_compressed(bp, buf);
memcpy(&zstd_hdr, buf, sizeof (zstd_hdr));
free(buf);
zstd_hdr.c_len = BE_32(zstd_hdr.c_len);
zstd_hdr.raw_version_level = BE_32(zstd_hdr.raw_version_level);
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
" ZSTD:size=%u:version=%u:level=%u:EMBEDDED",
zstd_hdr.c_len, zfs_get_hdrversion(&zstd_hdr),
zfs_get_hdrlevel(&zstd_hdr));
return;
}
pabd = abd_alloc_for_io(SPA_MAXBLOCKSIZE, B_FALSE);
zio = zio_root(spa, NULL, NULL, 0);
/* Decrypt but don't decompress so we can read the compression header */
zio_nowait(zio_read(zio, spa, bp, pabd, BP_GET_PSIZE(bp), NULL, NULL,
ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW_COMPRESS,
NULL));
error = zio_wait(zio);
if (error) {
(void) fprintf(stderr, "read failed: %d\n", error);
return;
}
buf = abd_borrow_buf_copy(pabd, BP_GET_LSIZE(bp));
memcpy(&zstd_hdr, buf, sizeof (zstd_hdr));
zstd_hdr.c_len = BE_32(zstd_hdr.c_len);
zstd_hdr.raw_version_level = BE_32(zstd_hdr.raw_version_level);
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
" ZSTD:size=%u:version=%u:level=%u:NORMAL",
zstd_hdr.c_len, zfs_get_hdrversion(&zstd_hdr),
zfs_get_hdrlevel(&zstd_hdr));
abd_return_buf_copy(pabd, buf, BP_GET_LSIZE(bp));
}
static void
snprintf_blkptr_compact(char *blkbuf, size_t buflen, const blkptr_t *bp,
boolean_t bp_freed)
{
const dva_t *dva = bp->blk_dva;
int ndvas = dump_opt['d'] > 5 ? BP_GET_NDVAS(bp) : 1;
int i;
if (dump_opt['b'] >= 6) {
snprintf_blkptr(blkbuf, buflen, bp);
if (bp_freed) {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), " %s", "FREE");
}
return;
}
if (BP_IS_EMBEDDED(bp)) {
(void) sprintf(blkbuf,
"EMBEDDED et=%u %llxL/%llxP B=%llu",
(int)BPE_GET_ETYPE(bp),
(u_longlong_t)BPE_GET_LSIZE(bp),
(u_longlong_t)BPE_GET_PSIZE(bp),
(u_longlong_t)bp->blk_birth);
return;
}
blkbuf[0] = '\0';
for (i = 0; i < ndvas; i++)
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), "%llu:%llx:%llx ",
(u_longlong_t)DVA_GET_VDEV(&dva[i]),
(u_longlong_t)DVA_GET_OFFSET(&dva[i]),
(u_longlong_t)DVA_GET_ASIZE(&dva[i]));
if (BP_IS_HOLE(bp)) {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
"%llxL B=%llu",
(u_longlong_t)BP_GET_LSIZE(bp),
(u_longlong_t)bp->blk_birth);
} else {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
"%llxL/%llxP F=%llu B=%llu/%llu",
(u_longlong_t)BP_GET_LSIZE(bp),
(u_longlong_t)BP_GET_PSIZE(bp),
(u_longlong_t)BP_GET_FILL(bp),
(u_longlong_t)bp->blk_birth,
(u_longlong_t)BP_PHYSICAL_BIRTH(bp));
if (bp_freed)
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), " %s", "FREE");
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
" cksum=%016llx:%016llx:%016llx:%016llx",
(u_longlong_t)bp->blk_cksum.zc_word[0],
(u_longlong_t)bp->blk_cksum.zc_word[1],
(u_longlong_t)bp->blk_cksum.zc_word[2],
(u_longlong_t)bp->blk_cksum.zc_word[3]);
}
}
static void
print_indirect(spa_t *spa, blkptr_t *bp, const zbookmark_phys_t *zb,
const dnode_phys_t *dnp)
{
char blkbuf[BP_SPRINTF_LEN];
int l;
if (!BP_IS_EMBEDDED(bp)) {
ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type);
ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level);
}
(void) printf("%16llx ", (u_longlong_t)blkid2offset(dnp, bp, zb));
ASSERT(zb->zb_level >= 0);
for (l = dnp->dn_nlevels - 1; l >= -1; l--) {
if (l == zb->zb_level) {
(void) printf("L%llx", (u_longlong_t)zb->zb_level);
} else {
(void) printf(" ");
}
}
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp, B_FALSE);
if (dump_opt['Z'] && BP_GET_COMPRESS(bp) == ZIO_COMPRESS_ZSTD)
snprintf_zstd_header(spa, blkbuf, sizeof (blkbuf), bp);
(void) printf("%s\n", blkbuf);
}
static int
visit_indirect(spa_t *spa, const dnode_phys_t *dnp,
blkptr_t *bp, const zbookmark_phys_t *zb)
{
int err = 0;
if (bp->blk_birth == 0)
return (0);
print_indirect(spa, bp, zb, dnp);
if (BP_GET_LEVEL(bp) > 0 && !BP_IS_HOLE(bp)) {
arc_flags_t flags = ARC_FLAG_WAIT;
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
arc_buf_t *buf;
uint64_t fill = 0;
ASSERT(!BP_IS_REDACTED(bp));
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
if (err)
return (err);
ASSERT(buf->b_data);
/* recursively visit blocks below this */
cbp = buf->b_data;
for (i = 0; i < epb; i++, cbp++) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
zb->zb_level - 1,
zb->zb_blkid * epb + i);
err = visit_indirect(spa, dnp, cbp, &czb);
if (err)
break;
fill += BP_GET_FILL(cbp);
}
if (!err)
ASSERT3U(fill, ==, BP_GET_FILL(bp));
arc_buf_destroy(buf, &buf);
}
return (err);
}
static void
dump_indirect(dnode_t *dn)
{
dnode_phys_t *dnp = dn->dn_phys;
zbookmark_phys_t czb;
(void) printf("Indirect blocks:\n");
SET_BOOKMARK(&czb, dmu_objset_id(dn->dn_objset),
dn->dn_object, dnp->dn_nlevels - 1, 0);
for (int j = 0; j < dnp->dn_nblkptr; j++) {
czb.zb_blkid = j;
(void) visit_indirect(dmu_objset_spa(dn->dn_objset), dnp,
&dnp->dn_blkptr[j], &czb);
}
(void) printf("\n");
}
static void
dump_dsl_dir(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object;
dsl_dir_phys_t *dd = data;
time_t crtime;
char nice[32];
/* make sure nicenum has enough space */
_Static_assert(sizeof (nice) >= NN_NUMBUF_SZ, "nice truncated");
if (dd == NULL)
return;
ASSERT3U(size, >=, sizeof (dsl_dir_phys_t));
crtime = dd->dd_creation_time;
(void) printf("\t\tcreation_time = %s", ctime(&crtime));
(void) printf("\t\thead_dataset_obj = %llu\n",
(u_longlong_t)dd->dd_head_dataset_obj);
(void) printf("\t\tparent_dir_obj = %llu\n",
(u_longlong_t)dd->dd_parent_obj);
(void) printf("\t\torigin_obj = %llu\n",
(u_longlong_t)dd->dd_origin_obj);
(void) printf("\t\tchild_dir_zapobj = %llu\n",
(u_longlong_t)dd->dd_child_dir_zapobj);
zdb_nicenum(dd->dd_used_bytes, nice, sizeof (nice));
(void) printf("\t\tused_bytes = %s\n", nice);
zdb_nicenum(dd->dd_compressed_bytes, nice, sizeof (nice));
(void) printf("\t\tcompressed_bytes = %s\n", nice);
zdb_nicenum(dd->dd_uncompressed_bytes, nice, sizeof (nice));
(void) printf("\t\tuncompressed_bytes = %s\n", nice);
zdb_nicenum(dd->dd_quota, nice, sizeof (nice));
(void) printf("\t\tquota = %s\n", nice);
zdb_nicenum(dd->dd_reserved, nice, sizeof (nice));
(void) printf("\t\treserved = %s\n", nice);
(void) printf("\t\tprops_zapobj = %llu\n",
(u_longlong_t)dd->dd_props_zapobj);
(void) printf("\t\tdeleg_zapobj = %llu\n",
(u_longlong_t)dd->dd_deleg_zapobj);
(void) printf("\t\tflags = %llx\n",
(u_longlong_t)dd->dd_flags);
#define DO(which) \
zdb_nicenum(dd->dd_used_breakdown[DD_USED_ ## which], nice, \
sizeof (nice)); \
(void) printf("\t\tused_breakdown[" #which "] = %s\n", nice)
DO(HEAD);
DO(SNAP);
DO(CHILD);
DO(CHILD_RSRV);
DO(REFRSRV);
#undef DO
(void) printf("\t\tclones = %llu\n",
(u_longlong_t)dd->dd_clones);
}
static void
dump_dsl_dataset(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object;
dsl_dataset_phys_t *ds = data;
time_t crtime;
char used[32], compressed[32], uncompressed[32], unique[32];
char blkbuf[BP_SPRINTF_LEN];
/* make sure nicenum has enough space */
_Static_assert(sizeof (used) >= NN_NUMBUF_SZ, "used truncated");
_Static_assert(sizeof (compressed) >= NN_NUMBUF_SZ,
"compressed truncated");
_Static_assert(sizeof (uncompressed) >= NN_NUMBUF_SZ,
"uncompressed truncated");
_Static_assert(sizeof (unique) >= NN_NUMBUF_SZ, "unique truncated");
if (ds == NULL)
return;
ASSERT(size == sizeof (*ds));
crtime = ds->ds_creation_time;
zdb_nicenum(ds->ds_referenced_bytes, used, sizeof (used));
zdb_nicenum(ds->ds_compressed_bytes, compressed, sizeof (compressed));
zdb_nicenum(ds->ds_uncompressed_bytes, uncompressed,
sizeof (uncompressed));
zdb_nicenum(ds->ds_unique_bytes, unique, sizeof (unique));
snprintf_blkptr(blkbuf, sizeof (blkbuf), &ds->ds_bp);
(void) printf("\t\tdir_obj = %llu\n",
(u_longlong_t)ds->ds_dir_obj);
(void) printf("\t\tprev_snap_obj = %llu\n",
(u_longlong_t)ds->ds_prev_snap_obj);
(void) printf("\t\tprev_snap_txg = %llu\n",
(u_longlong_t)ds->ds_prev_snap_txg);
(void) printf("\t\tnext_snap_obj = %llu\n",
(u_longlong_t)ds->ds_next_snap_obj);
(void) printf("\t\tsnapnames_zapobj = %llu\n",
(u_longlong_t)ds->ds_snapnames_zapobj);
(void) printf("\t\tnum_children = %llu\n",
(u_longlong_t)ds->ds_num_children);
(void) printf("\t\tuserrefs_obj = %llu\n",
(u_longlong_t)ds->ds_userrefs_obj);
(void) printf("\t\tcreation_time = %s", ctime(&crtime));
(void) printf("\t\tcreation_txg = %llu\n",
(u_longlong_t)ds->ds_creation_txg);
(void) printf("\t\tdeadlist_obj = %llu\n",
(u_longlong_t)ds->ds_deadlist_obj);
(void) printf("\t\tused_bytes = %s\n", used);
(void) printf("\t\tcompressed_bytes = %s\n", compressed);
(void) printf("\t\tuncompressed_bytes = %s\n", uncompressed);
(void) printf("\t\tunique = %s\n", unique);
(void) printf("\t\tfsid_guid = %llu\n",
(u_longlong_t)ds->ds_fsid_guid);
(void) printf("\t\tguid = %llu\n",
(u_longlong_t)ds->ds_guid);
(void) printf("\t\tflags = %llx\n",
(u_longlong_t)ds->ds_flags);
(void) printf("\t\tnext_clones_obj = %llu\n",
(u_longlong_t)ds->ds_next_clones_obj);
(void) printf("\t\tprops_obj = %llu\n",
(u_longlong_t)ds->ds_props_obj);
(void) printf("\t\tbp = %s\n", blkbuf);
}
static int
dump_bptree_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
(void) arg, (void) tx;
char blkbuf[BP_SPRINTF_LEN];
if (bp->blk_birth != 0) {
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("\t%s\n", blkbuf);
}
return (0);
}
static void
dump_bptree(objset_t *os, uint64_t obj, const char *name)
{
char bytes[32];
bptree_phys_t *bt;
dmu_buf_t *db;
/* make sure nicenum has enough space */
_Static_assert(sizeof (bytes) >= NN_NUMBUF_SZ, "bytes truncated");
if (dump_opt['d'] < 3)
return;
VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
bt = db->db_data;
zdb_nicenum(bt->bt_bytes, bytes, sizeof (bytes));
(void) printf("\n %s: %llu datasets, %s\n",
name, (unsigned long long)(bt->bt_end - bt->bt_begin), bytes);
dmu_buf_rele(db, FTAG);
if (dump_opt['d'] < 5)
return;
(void) printf("\n");
(void) bptree_iterate(os, obj, B_FALSE, dump_bptree_cb, NULL, NULL);
}
static int
dump_bpobj_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx)
{
(void) arg, (void) tx;
char blkbuf[BP_SPRINTF_LEN];
ASSERT(bp->blk_birth != 0);
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp, bp_freed);
(void) printf("\t%s\n", blkbuf);
return (0);
}
static void
dump_full_bpobj(bpobj_t *bpo, const char *name, int indent)
{
char bytes[32];
char comp[32];
char uncomp[32];
uint64_t i;
/* make sure nicenum has enough space */
_Static_assert(sizeof (bytes) >= NN_NUMBUF_SZ, "bytes truncated");
_Static_assert(sizeof (comp) >= NN_NUMBUF_SZ, "comp truncated");
_Static_assert(sizeof (uncomp) >= NN_NUMBUF_SZ, "uncomp truncated");
if (dump_opt['d'] < 3)
return;
zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes, sizeof (bytes));
if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
zdb_nicenum(bpo->bpo_phys->bpo_comp, comp, sizeof (comp));
zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp, sizeof (uncomp));
if (bpo->bpo_havefreed) {
(void) printf(" %*s: object %llu, %llu local "
"blkptrs, %llu freed, %llu subobjs in object %llu, "
"%s (%s/%s comp)\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_freed,
(u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
(u_longlong_t)bpo->bpo_phys->bpo_subobjs,
bytes, comp, uncomp);
} else {
(void) printf(" %*s: object %llu, %llu local "
"blkptrs, %llu subobjs in object %llu, "
"%s (%s/%s comp)\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
(u_longlong_t)bpo->bpo_phys->bpo_subobjs,
bytes, comp, uncomp);
}
for (i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
uint64_t subobj;
bpobj_t subbpo;
int error;
VERIFY0(dmu_read(bpo->bpo_os,
bpo->bpo_phys->bpo_subobjs,
i * sizeof (subobj), sizeof (subobj), &subobj, 0));
error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
if (error != 0) {
(void) printf("ERROR %u while trying to open "
"subobj id %llu\n",
error, (u_longlong_t)subobj);
continue;
}
dump_full_bpobj(&subbpo, "subobj", indent + 1);
bpobj_close(&subbpo);
}
} else {
if (bpo->bpo_havefreed) {
(void) printf(" %*s: object %llu, %llu blkptrs, "
"%llu freed, %s\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_freed,
bytes);
} else {
(void) printf(" %*s: object %llu, %llu blkptrs, "
"%s\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
bytes);
}
}
if (dump_opt['d'] < 5)
return;
if (indent == 0) {
(void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL);
(void) printf("\n");
}
}
static int
dump_bookmark(dsl_pool_t *dp, char *name, boolean_t print_redact,
boolean_t print_list)
{
int err = 0;
zfs_bookmark_phys_t prop;
objset_t *mos = dp->dp_spa->spa_meta_objset;
err = dsl_bookmark_lookup(dp, name, NULL, &prop);
if (err != 0) {
return (err);
}
(void) printf("\t#%s: ", strchr(name, '#') + 1);
(void) printf("{guid: %llx creation_txg: %llu creation_time: "
"%llu redaction_obj: %llu}\n", (u_longlong_t)prop.zbm_guid,
(u_longlong_t)prop.zbm_creation_txg,
(u_longlong_t)prop.zbm_creation_time,
(u_longlong_t)prop.zbm_redaction_obj);
IMPLY(print_list, print_redact);
if (!print_redact || prop.zbm_redaction_obj == 0)
return (0);
redaction_list_t *rl;
VERIFY0(dsl_redaction_list_hold_obj(dp,
prop.zbm_redaction_obj, FTAG, &rl));
redaction_list_phys_t *rlp = rl->rl_phys;
(void) printf("\tRedacted:\n\t\tProgress: ");
if (rlp->rlp_last_object != UINT64_MAX ||
rlp->rlp_last_blkid != UINT64_MAX) {
(void) printf("%llu %llu (incomplete)\n",
(u_longlong_t)rlp->rlp_last_object,
(u_longlong_t)rlp->rlp_last_blkid);
} else {
(void) printf("complete\n");
}
(void) printf("\t\tSnapshots: [");
for (unsigned int i = 0; i < rlp->rlp_num_snaps; i++) {
if (i > 0)
(void) printf(", ");
(void) printf("%0llu",
(u_longlong_t)rlp->rlp_snaps[i]);
}
(void) printf("]\n\t\tLength: %llu\n",
(u_longlong_t)rlp->rlp_num_entries);
if (!print_list) {
dsl_redaction_list_rele(rl, FTAG);
return (0);
}
if (rlp->rlp_num_entries == 0) {
dsl_redaction_list_rele(rl, FTAG);
(void) printf("\t\tRedaction List: []\n\n");
return (0);
}
redact_block_phys_t *rbp_buf;
uint64_t size;
dmu_object_info_t doi;
VERIFY0(dmu_object_info(mos, prop.zbm_redaction_obj, &doi));
size = doi.doi_max_offset;
rbp_buf = kmem_alloc(size, KM_SLEEP);
err = dmu_read(mos, prop.zbm_redaction_obj, 0, size,
rbp_buf, 0);
if (err != 0) {
dsl_redaction_list_rele(rl, FTAG);
kmem_free(rbp_buf, size);
return (err);
}
(void) printf("\t\tRedaction List: [{object: %llx, offset: "
"%llx, blksz: %x, count: %llx}",
(u_longlong_t)rbp_buf[0].rbp_object,
(u_longlong_t)rbp_buf[0].rbp_blkid,
(uint_t)(redact_block_get_size(&rbp_buf[0])),
(u_longlong_t)redact_block_get_count(&rbp_buf[0]));
for (size_t i = 1; i < rlp->rlp_num_entries; i++) {
(void) printf(",\n\t\t{object: %llx, offset: %llx, "
"blksz: %x, count: %llx}",
(u_longlong_t)rbp_buf[i].rbp_object,
(u_longlong_t)rbp_buf[i].rbp_blkid,
(uint_t)(redact_block_get_size(&rbp_buf[i])),
(u_longlong_t)redact_block_get_count(&rbp_buf[i]));
}
dsl_redaction_list_rele(rl, FTAG);
kmem_free(rbp_buf, size);
(void) printf("]\n\n");
return (0);
}
static void
dump_bookmarks(objset_t *os, int verbosity)
{
zap_cursor_t zc;
zap_attribute_t attr;
dsl_dataset_t *ds = dmu_objset_ds(os);
dsl_pool_t *dp = spa_get_dsl(os->os_spa);
objset_t *mos = os->os_spa->spa_meta_objset;
if (verbosity < 4)
return;
dsl_pool_config_enter(dp, FTAG);
for (zap_cursor_init(&zc, mos, ds->ds_bookmarks_obj);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
char osname[ZFS_MAX_DATASET_NAME_LEN];
char buf[ZFS_MAX_DATASET_NAME_LEN];
int len;
dmu_objset_name(os, osname);
len = snprintf(buf, sizeof (buf), "%s#%s", osname,
attr.za_name);
VERIFY3S(len, <, ZFS_MAX_DATASET_NAME_LEN);
(void) dump_bookmark(dp, buf, verbosity >= 5, verbosity >= 6);
}
zap_cursor_fini(&zc);
dsl_pool_config_exit(dp, FTAG);
}
static void
bpobj_count_refd(bpobj_t *bpo)
{
mos_obj_refd(bpo->bpo_object);
if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
mos_obj_refd(bpo->bpo_phys->bpo_subobjs);
for (uint64_t i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
uint64_t subobj;
bpobj_t subbpo;
int error;
VERIFY0(dmu_read(bpo->bpo_os,
bpo->bpo_phys->bpo_subobjs,
i * sizeof (subobj), sizeof (subobj), &subobj, 0));
error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
if (error != 0) {
(void) printf("ERROR %u while trying to open "
"subobj id %llu\n",
error, (u_longlong_t)subobj);
continue;
}
bpobj_count_refd(&subbpo);
bpobj_close(&subbpo);
}
}
}
static int
dsl_deadlist_entry_count_refd(void *arg, dsl_deadlist_entry_t *dle)
{
spa_t *spa = arg;
uint64_t empty_bpobj = spa->spa_dsl_pool->dp_empty_bpobj;
if (dle->dle_bpobj.bpo_object != empty_bpobj)
bpobj_count_refd(&dle->dle_bpobj);
return (0);
}
static int
dsl_deadlist_entry_dump(void *arg, dsl_deadlist_entry_t *dle)
{
ASSERT(arg == NULL);
if (dump_opt['d'] >= 5) {
char buf[128];
(void) snprintf(buf, sizeof (buf),
"mintxg %llu -> obj %llu",
(longlong_t)dle->dle_mintxg,
(longlong_t)dle->dle_bpobj.bpo_object);
dump_full_bpobj(&dle->dle_bpobj, buf, 0);
} else {
(void) printf("mintxg %llu -> obj %llu\n",
(longlong_t)dle->dle_mintxg,
(longlong_t)dle->dle_bpobj.bpo_object);
}
return (0);
}
static void
dump_blkptr_list(dsl_deadlist_t *dl, const char *name)
{
char bytes[32];
char comp[32];
char uncomp[32];
char entries[32];
spa_t *spa = dmu_objset_spa(dl->dl_os);
uint64_t empty_bpobj = spa->spa_dsl_pool->dp_empty_bpobj;
if (dl->dl_oldfmt) {
if (dl->dl_bpobj.bpo_object != empty_bpobj)
bpobj_count_refd(&dl->dl_bpobj);
} else {
mos_obj_refd(dl->dl_object);
dsl_deadlist_iterate(dl, dsl_deadlist_entry_count_refd, spa);
}
/* make sure nicenum has enough space */
_Static_assert(sizeof (bytes) >= NN_NUMBUF_SZ, "bytes truncated");
_Static_assert(sizeof (comp) >= NN_NUMBUF_SZ, "comp truncated");
_Static_assert(sizeof (uncomp) >= NN_NUMBUF_SZ, "uncomp truncated");
_Static_assert(sizeof (entries) >= NN_NUMBUF_SZ, "entries truncated");
if (dump_opt['d'] < 3)
return;
if (dl->dl_oldfmt) {
dump_full_bpobj(&dl->dl_bpobj, "old-format deadlist", 0);
return;
}
zdb_nicenum(dl->dl_phys->dl_used, bytes, sizeof (bytes));
zdb_nicenum(dl->dl_phys->dl_comp, comp, sizeof (comp));
zdb_nicenum(dl->dl_phys->dl_uncomp, uncomp, sizeof (uncomp));
zdb_nicenum(avl_numnodes(&dl->dl_tree), entries, sizeof (entries));
(void) printf("\n %s: %s (%s/%s comp), %s entries\n",
name, bytes, comp, uncomp, entries);
if (dump_opt['d'] < 4)
return;
(void) putchar('\n');
dsl_deadlist_iterate(dl, dsl_deadlist_entry_dump, NULL);
}
static int
verify_dd_livelist(objset_t *os)
{
uint64_t ll_used, used, ll_comp, comp, ll_uncomp, uncomp;
dsl_pool_t *dp = spa_get_dsl(os->os_spa);
dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
ASSERT(!dmu_objset_is_snapshot(os));
if (!dsl_deadlist_is_open(&dd->dd_livelist))
return (0);
/* Iterate through the livelist to check for duplicates */
dsl_deadlist_iterate(&dd->dd_livelist, sublivelist_verify_lightweight,
NULL);
dsl_pool_config_enter(dp, FTAG);
dsl_deadlist_space(&dd->dd_livelist, &ll_used,
&ll_comp, &ll_uncomp);
dsl_dataset_t *origin_ds;
ASSERT(dsl_pool_config_held(dp));
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(dd)->dd_origin_obj, FTAG, &origin_ds));
VERIFY0(dsl_dataset_space_written(origin_ds, os->os_dsl_dataset,
&used, &comp, &uncomp));
dsl_dataset_rele(origin_ds, FTAG);
dsl_pool_config_exit(dp, FTAG);
/*
* It's possible that the dataset's uncomp space is larger than the
* livelist's because livelists do not track embedded block pointers
*/
if (used != ll_used || comp != ll_comp || uncomp < ll_uncomp) {
char nice_used[32], nice_comp[32], nice_uncomp[32];
(void) printf("Discrepancy in space accounting:\n");
zdb_nicenum(used, nice_used, sizeof (nice_used));
zdb_nicenum(comp, nice_comp, sizeof (nice_comp));
zdb_nicenum(uncomp, nice_uncomp, sizeof (nice_uncomp));
(void) printf("dir: used %s, comp %s, uncomp %s\n",
nice_used, nice_comp, nice_uncomp);
zdb_nicenum(ll_used, nice_used, sizeof (nice_used));
zdb_nicenum(ll_comp, nice_comp, sizeof (nice_comp));
zdb_nicenum(ll_uncomp, nice_uncomp, sizeof (nice_uncomp));
(void) printf("livelist: used %s, comp %s, uncomp %s\n",
nice_used, nice_comp, nice_uncomp);
return (1);
}
return (0);
}
static char *key_material = NULL;
static boolean_t
zdb_derive_key(dsl_dir_t *dd, uint8_t *key_out)
{
uint64_t keyformat, salt, iters;
int i;
unsigned char c;
VERIFY0(zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), sizeof (uint64_t),
1, &keyformat));
switch (keyformat) {
case ZFS_KEYFORMAT_HEX:
for (i = 0; i < WRAPPING_KEY_LEN * 2; i += 2) {
if (!isxdigit(key_material[i]) ||
!isxdigit(key_material[i+1]))
return (B_FALSE);
if (sscanf(&key_material[i], "%02hhx", &c) != 1)
return (B_FALSE);
key_out[i / 2] = c;
}
break;
case ZFS_KEYFORMAT_PASSPHRASE:
VERIFY0(zap_lookup(dd->dd_pool->dp_meta_objset,
dd->dd_crypto_obj, zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT),
sizeof (uint64_t), 1, &salt));
VERIFY0(zap_lookup(dd->dd_pool->dp_meta_objset,
dd->dd_crypto_obj, zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS),
sizeof (uint64_t), 1, &iters));
if (PKCS5_PBKDF2_HMAC_SHA1(key_material, strlen(key_material),
((uint8_t *)&salt), sizeof (uint64_t), iters,
WRAPPING_KEY_LEN, key_out) != 1)
return (B_FALSE);
break;
default:
fatal("no support for key format %u\n",
(unsigned int) keyformat);
}
return (B_TRUE);
}
static char encroot[ZFS_MAX_DATASET_NAME_LEN];
static boolean_t key_loaded = B_FALSE;
static void
zdb_load_key(objset_t *os)
{
dsl_pool_t *dp;
dsl_dir_t *dd, *rdd;
uint8_t key[WRAPPING_KEY_LEN];
uint64_t rddobj;
int err;
dp = spa_get_dsl(os->os_spa);
dd = os->os_dsl_dataset->ds_dir;
dsl_pool_config_enter(dp, FTAG);
VERIFY0(zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
DSL_CRYPTO_KEY_ROOT_DDOBJ, sizeof (uint64_t), 1, &rddobj));
VERIFY0(dsl_dir_hold_obj(dd->dd_pool, rddobj, NULL, FTAG, &rdd));
dsl_dir_name(rdd, encroot);
dsl_dir_rele(rdd, FTAG);
if (!zdb_derive_key(dd, key))
fatal("couldn't derive encryption key");
dsl_pool_config_exit(dp, FTAG);
ASSERT3U(dsl_dataset_get_keystatus(dd), ==, ZFS_KEYSTATUS_UNAVAILABLE);
dsl_crypto_params_t *dcp;
nvlist_t *crypto_args;
crypto_args = fnvlist_alloc();
fnvlist_add_uint8_array(crypto_args, "wkeydata",
(uint8_t *)key, WRAPPING_KEY_LEN);
VERIFY0(dsl_crypto_params_create_nvlist(DCP_CMD_NONE,
NULL, crypto_args, &dcp));
err = spa_keystore_load_wkey(encroot, dcp, B_FALSE);
dsl_crypto_params_free(dcp, (err != 0));
fnvlist_free(crypto_args);
if (err != 0)
fatal(
"couldn't load encryption key for %s: %s",
encroot, err == ZFS_ERR_CRYPTO_NOTSUP ?
"crypto params not supported" : strerror(err));
ASSERT3U(dsl_dataset_get_keystatus(dd), ==, ZFS_KEYSTATUS_AVAILABLE);
printf("Unlocked encryption root: %s\n", encroot);
key_loaded = B_TRUE;
}
static void
zdb_unload_key(void)
{
if (!key_loaded)
return;
VERIFY0(spa_keystore_unload_wkey(encroot));
key_loaded = B_FALSE;
}
static avl_tree_t idx_tree;
static avl_tree_t domain_tree;
static boolean_t fuid_table_loaded;
static objset_t *sa_os = NULL;
static sa_attr_type_t *sa_attr_table = NULL;
static int
open_objset(const char *path, const void *tag, objset_t **osp)
{
int err;
uint64_t sa_attrs = 0;
uint64_t version = 0;
VERIFY3P(sa_os, ==, NULL);
/*
* We can't own an objset if it's redacted. Therefore, we do this
* dance: hold the objset, then acquire a long hold on its dataset, then
* release the pool (which is held as part of holding the objset).
*/
if (dump_opt['K']) {
/* decryption requested, try to load keys */
err = dmu_objset_hold(path, tag, osp);
if (err != 0) {
(void) fprintf(stderr, "failed to hold dataset "
"'%s': %s\n",
path, strerror(err));
return (err);
}
dsl_dataset_long_hold(dmu_objset_ds(*osp), tag);
dsl_pool_rele(dmu_objset_pool(*osp), tag);
/* succeeds or dies */
zdb_load_key(*osp);
/* release it all */
dsl_dataset_long_rele(dmu_objset_ds(*osp), tag);
dsl_dataset_rele(dmu_objset_ds(*osp), tag);
}
int ds_hold_flags = key_loaded ? DS_HOLD_FLAG_DECRYPT : 0;
err = dmu_objset_hold_flags(path, ds_hold_flags, tag, osp);
if (err != 0) {
(void) fprintf(stderr, "failed to hold dataset '%s': %s\n",
path, strerror(err));
return (err);
}
dsl_dataset_long_hold(dmu_objset_ds(*osp), tag);
dsl_pool_rele(dmu_objset_pool(*osp), tag);
if (dmu_objset_type(*osp) == DMU_OST_ZFS &&
(key_loaded || !(*osp)->os_encrypted)) {
(void) zap_lookup(*osp, MASTER_NODE_OBJ, ZPL_VERSION_STR,
8, 1, &version);
if (version >= ZPL_VERSION_SA) {
(void) zap_lookup(*osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS,
8, 1, &sa_attrs);
}
err = sa_setup(*osp, sa_attrs, zfs_attr_table, ZPL_END,
&sa_attr_table);
if (err != 0) {
(void) fprintf(stderr, "sa_setup failed: %s\n",
strerror(err));
dsl_dataset_long_rele(dmu_objset_ds(*osp), tag);
dsl_dataset_rele_flags(dmu_objset_ds(*osp),
ds_hold_flags, tag);
*osp = NULL;
}
}
sa_os = *osp;
return (err);
}
static void
close_objset(objset_t *os, const void *tag)
{
VERIFY3P(os, ==, sa_os);
if (os->os_sa != NULL)
sa_tear_down(os);
dsl_dataset_long_rele(dmu_objset_ds(os), tag);
dsl_dataset_rele_flags(dmu_objset_ds(os),
key_loaded ? DS_HOLD_FLAG_DECRYPT : 0, tag);
sa_attr_table = NULL;
sa_os = NULL;
zdb_unload_key();
}
static void
fuid_table_destroy(void)
{
if (fuid_table_loaded) {
zfs_fuid_table_destroy(&idx_tree, &domain_tree);
fuid_table_loaded = B_FALSE;
}
}
/*
* print uid or gid information.
* For normal POSIX id just the id is printed in decimal format.
* For CIFS files with FUID the fuid is printed in hex followed by
* the domain-rid string.
*/
static void
print_idstr(uint64_t id, const char *id_type)
{
if (FUID_INDEX(id)) {
const char *domain =
zfs_fuid_idx_domain(&idx_tree, FUID_INDEX(id));
(void) printf("\t%s %llx [%s-%d]\n", id_type,
(u_longlong_t)id, domain, (int)FUID_RID(id));
} else {
(void) printf("\t%s %llu\n", id_type, (u_longlong_t)id);
}
}
static void
dump_uidgid(objset_t *os, uint64_t uid, uint64_t gid)
{
uint32_t uid_idx, gid_idx;
uid_idx = FUID_INDEX(uid);
gid_idx = FUID_INDEX(gid);
/* Load domain table, if not already loaded */
if (!fuid_table_loaded && (uid_idx || gid_idx)) {
uint64_t fuid_obj;
/* first find the fuid object. It lives in the master node */
VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES,
8, 1, &fuid_obj) == 0);
zfs_fuid_avl_tree_create(&idx_tree, &domain_tree);
(void) zfs_fuid_table_load(os, fuid_obj,
&idx_tree, &domain_tree);
fuid_table_loaded = B_TRUE;
}
print_idstr(uid, "uid");
print_idstr(gid, "gid");
}
static void
dump_znode_sa_xattr(sa_handle_t *hdl)
{
nvlist_t *sa_xattr;
nvpair_t *elem = NULL;
int sa_xattr_size = 0;
int sa_xattr_entries = 0;
int error;
char *sa_xattr_packed;
error = sa_size(hdl, sa_attr_table[ZPL_DXATTR], &sa_xattr_size);
if (error || sa_xattr_size == 0)
return;
sa_xattr_packed = malloc(sa_xattr_size);
if (sa_xattr_packed == NULL)
return;
error = sa_lookup(hdl, sa_attr_table[ZPL_DXATTR],
sa_xattr_packed, sa_xattr_size);
if (error) {
free(sa_xattr_packed);
return;
}
error = nvlist_unpack(sa_xattr_packed, sa_xattr_size, &sa_xattr, 0);
if (error) {
free(sa_xattr_packed);
return;
}
while ((elem = nvlist_next_nvpair(sa_xattr, elem)) != NULL)
sa_xattr_entries++;
(void) printf("\tSA xattrs: %d bytes, %d entries\n\n",
sa_xattr_size, sa_xattr_entries);
while ((elem = nvlist_next_nvpair(sa_xattr, elem)) != NULL) {
boolean_t can_print = !dump_opt['P'];
uchar_t *value;
uint_t cnt, idx;
(void) printf("\t\t%s = ", nvpair_name(elem));
nvpair_value_byte_array(elem, &value, &cnt);
for (idx = 0; idx < cnt; ++idx) {
if (!isprint(value[idx])) {
can_print = B_FALSE;
break;
}
}
for (idx = 0; idx < cnt; ++idx) {
if (can_print)
(void) putchar(value[idx]);
else
(void) printf("\\%3.3o", value[idx]);
}
(void) putchar('\n');
}
nvlist_free(sa_xattr);
free(sa_xattr_packed);
}
static void
dump_znode_symlink(sa_handle_t *hdl)
{
int sa_symlink_size = 0;
char linktarget[MAXPATHLEN];
int error;
error = sa_size(hdl, sa_attr_table[ZPL_SYMLINK], &sa_symlink_size);
if (error || sa_symlink_size == 0) {
return;
}
if (sa_symlink_size >= sizeof (linktarget)) {
(void) printf("symlink size %d is too large\n",
sa_symlink_size);
return;
}
linktarget[sa_symlink_size] = '\0';
if (sa_lookup(hdl, sa_attr_table[ZPL_SYMLINK],
&linktarget, sa_symlink_size) == 0)
(void) printf("\ttarget %s\n", linktarget);
}
static void
dump_znode(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
char path[MAXPATHLEN * 2]; /* allow for xattr and failure prefix */
sa_handle_t *hdl;
uint64_t xattr, rdev, gen;
uint64_t uid, gid, mode, fsize, parent, links;
uint64_t pflags;
uint64_t acctm[2], modtm[2], chgtm[2], crtm[2];
time_t z_crtime, z_atime, z_mtime, z_ctime;
sa_bulk_attr_t bulk[12];
int idx = 0;
int error;
VERIFY3P(os, ==, sa_os);
if (sa_handle_get(os, object, NULL, SA_HDL_PRIVATE, &hdl)) {
(void) printf("Failed to get handle for SA znode\n");
return;
}
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_UID], NULL, &uid, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GID], NULL, &gid, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_LINKS], NULL,
&links, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GEN], NULL, &gen, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MODE], NULL,
&mode, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_PARENT],
NULL, &parent, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_SIZE], NULL,
&fsize, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_ATIME], NULL,
acctm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MTIME], NULL,
modtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CRTIME], NULL,
crtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CTIME], NULL,
chgtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_FLAGS], NULL,
&pflags, 8);
if (sa_bulk_lookup(hdl, bulk, idx)) {
(void) sa_handle_destroy(hdl);
return;
}
z_crtime = (time_t)crtm[0];
z_atime = (time_t)acctm[0];
z_mtime = (time_t)modtm[0];
z_ctime = (time_t)chgtm[0];
if (dump_opt['d'] > 4) {
error = zfs_obj_to_path(os, object, path, sizeof (path));
if (error == ESTALE) {
(void) snprintf(path, sizeof (path), "on delete queue");
} else if (error != 0) {
leaked_objects++;
(void) snprintf(path, sizeof (path),
"path not found, possibly leaked");
}
(void) printf("\tpath %s\n", path);
}
if (S_ISLNK(mode))
dump_znode_symlink(hdl);
dump_uidgid(os, uid, gid);
(void) printf("\tatime %s", ctime(&z_atime));
(void) printf("\tmtime %s", ctime(&z_mtime));
(void) printf("\tctime %s", ctime(&z_ctime));
(void) printf("\tcrtime %s", ctime(&z_crtime));
(void) printf("\tgen %llu\n", (u_longlong_t)gen);
(void) printf("\tmode %llo\n", (u_longlong_t)mode);
(void) printf("\tsize %llu\n", (u_longlong_t)fsize);
(void) printf("\tparent %llu\n", (u_longlong_t)parent);
(void) printf("\tlinks %llu\n", (u_longlong_t)links);
(void) printf("\tpflags %llx\n", (u_longlong_t)pflags);
if (dmu_objset_projectquota_enabled(os) && (pflags & ZFS_PROJID)) {
uint64_t projid;
if (sa_lookup(hdl, sa_attr_table[ZPL_PROJID], &projid,
sizeof (uint64_t)) == 0)
(void) printf("\tprojid %llu\n", (u_longlong_t)projid);
}
if (sa_lookup(hdl, sa_attr_table[ZPL_XATTR], &xattr,
sizeof (uint64_t)) == 0)
(void) printf("\txattr %llu\n", (u_longlong_t)xattr);
if (sa_lookup(hdl, sa_attr_table[ZPL_RDEV], &rdev,
sizeof (uint64_t)) == 0)
(void) printf("\trdev 0x%016llx\n", (u_longlong_t)rdev);
dump_znode_sa_xattr(hdl);
sa_handle_destroy(hdl);
}
static void
dump_acl(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static void
dump_dmu_objset(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static object_viewer_t *object_viewer[DMU_OT_NUMTYPES + 1] = {
dump_none, /* unallocated */
dump_zap, /* object directory */
dump_uint64, /* object array */
dump_none, /* packed nvlist */
dump_packed_nvlist, /* packed nvlist size */
dump_none, /* bpobj */
dump_bpobj, /* bpobj header */
dump_none, /* SPA space map header */
dump_none, /* SPA space map */
dump_none, /* ZIL intent log */
dump_dnode, /* DMU dnode */
dump_dmu_objset, /* DMU objset */
dump_dsl_dir, /* DSL directory */
dump_zap, /* DSL directory child map */
dump_zap, /* DSL dataset snap map */
dump_zap, /* DSL props */
dump_dsl_dataset, /* DSL dataset */
dump_znode, /* ZFS znode */
dump_acl, /* ZFS V0 ACL */
dump_uint8, /* ZFS plain file */
dump_zpldir, /* ZFS directory */
dump_zap, /* ZFS master node */
dump_zap, /* ZFS delete queue */
dump_uint8, /* zvol object */
dump_zap, /* zvol prop */
dump_uint8, /* other uint8[] */
dump_uint64, /* other uint64[] */
dump_zap, /* other ZAP */
dump_zap, /* persistent error log */
dump_uint8, /* SPA history */
dump_history_offsets, /* SPA history offsets */
dump_zap, /* Pool properties */
dump_zap, /* DSL permissions */
dump_acl, /* ZFS ACL */
dump_uint8, /* ZFS SYSACL */
dump_none, /* FUID nvlist */
dump_packed_nvlist, /* FUID nvlist size */
dump_zap, /* DSL dataset next clones */
dump_zap, /* DSL scrub queue */
dump_zap, /* ZFS user/group/project used */
dump_zap, /* ZFS user/group/project quota */
dump_zap, /* snapshot refcount tags */
dump_ddt_zap, /* DDT ZAP object */
dump_zap, /* DDT statistics */
dump_znode, /* SA object */
dump_zap, /* SA Master Node */
dump_sa_attrs, /* SA attribute registration */
dump_sa_layouts, /* SA attribute layouts */
dump_zap, /* DSL scrub translations */
dump_none, /* fake dedup BP */
dump_zap, /* deadlist */
dump_none, /* deadlist hdr */
dump_zap, /* dsl clones */
dump_bpobj_subobjs, /* bpobj subobjs */
dump_unknown, /* Unknown type, must be last */
};
static boolean_t
match_object_type(dmu_object_type_t obj_type, uint64_t flags)
{
boolean_t match = B_TRUE;
switch (obj_type) {
case DMU_OT_DIRECTORY_CONTENTS:
if (!(flags & ZOR_FLAG_DIRECTORY))
match = B_FALSE;
break;
case DMU_OT_PLAIN_FILE_CONTENTS:
if (!(flags & ZOR_FLAG_PLAIN_FILE))
match = B_FALSE;
break;
case DMU_OT_SPACE_MAP:
if (!(flags & ZOR_FLAG_SPACE_MAP))
match = B_FALSE;
break;
default:
if (strcmp(zdb_ot_name(obj_type), "zap") == 0) {
if (!(flags & ZOR_FLAG_ZAP))
match = B_FALSE;
break;
}
/*
* If all bits except some of the supported flags are
* set, the user combined the all-types flag (A) with
* a negated flag to exclude some types (e.g. A-f to
* show all object types except plain files).
*/
if ((flags | ZOR_SUPPORTED_FLAGS) != ZOR_FLAG_ALL_TYPES)
match = B_FALSE;
break;
}
return (match);
}
static void
dump_object(objset_t *os, uint64_t object, int verbosity,
boolean_t *print_header, uint64_t *dnode_slots_used, uint64_t flags)
{
dmu_buf_t *db = NULL;
dmu_object_info_t doi;
dnode_t *dn;
boolean_t dnode_held = B_FALSE;
void *bonus = NULL;
size_t bsize = 0;
char iblk[32], dblk[32], lsize[32], asize[32], fill[32], dnsize[32];
char bonus_size[32];
char aux[50];
int error;
/* make sure nicenum has enough space */
_Static_assert(sizeof (iblk) >= NN_NUMBUF_SZ, "iblk truncated");
_Static_assert(sizeof (dblk) >= NN_NUMBUF_SZ, "dblk truncated");
_Static_assert(sizeof (lsize) >= NN_NUMBUF_SZ, "lsize truncated");
_Static_assert(sizeof (asize) >= NN_NUMBUF_SZ, "asize truncated");
_Static_assert(sizeof (bonus_size) >= NN_NUMBUF_SZ,
"bonus_size truncated");
if (*print_header) {
(void) printf("\n%10s %3s %5s %5s %5s %6s %5s %6s %s\n",
"Object", "lvl", "iblk", "dblk", "dsize", "dnsize",
"lsize", "%full", "type");
*print_header = 0;
}
if (object == 0) {
dn = DMU_META_DNODE(os);
dmu_object_info_from_dnode(dn, &doi);
} else {
/*
* Encrypted datasets will have sensitive bonus buffers
* encrypted. Therefore we cannot hold the bonus buffer and
* must hold the dnode itself instead.
*/
error = dmu_object_info(os, object, &doi);
if (error)
fatal("dmu_object_info() failed, errno %u", error);
if (!key_loaded && os->os_encrypted &&
DMU_OT_IS_ENCRYPTED(doi.doi_bonus_type)) {
error = dnode_hold(os, object, FTAG, &dn);
if (error)
fatal("dnode_hold() failed, errno %u", error);
dnode_held = B_TRUE;
} else {
error = dmu_bonus_hold(os, object, FTAG, &db);
if (error)
fatal("dmu_bonus_hold(%llu) failed, errno %u",
object, error);
bonus = db->db_data;
bsize = db->db_size;
dn = DB_DNODE((dmu_buf_impl_t *)db);
}
}
/*
* Default to showing all object types if no flags were specified.
*/
if (flags != 0 && flags != ZOR_FLAG_ALL_TYPES &&
!match_object_type(doi.doi_type, flags))
goto out;
if (dnode_slots_used)
*dnode_slots_used = doi.doi_dnodesize / DNODE_MIN_SIZE;
zdb_nicenum(doi.doi_metadata_block_size, iblk, sizeof (iblk));
zdb_nicenum(doi.doi_data_block_size, dblk, sizeof (dblk));
zdb_nicenum(doi.doi_max_offset, lsize, sizeof (lsize));
zdb_nicenum(doi.doi_physical_blocks_512 << 9, asize, sizeof (asize));
zdb_nicenum(doi.doi_bonus_size, bonus_size, sizeof (bonus_size));
zdb_nicenum(doi.doi_dnodesize, dnsize, sizeof (dnsize));
(void) snprintf(fill, sizeof (fill), "%6.2f", 100.0 *
doi.doi_fill_count * doi.doi_data_block_size / (object == 0 ?
DNODES_PER_BLOCK : 1) / doi.doi_max_offset);
aux[0] = '\0';
if (doi.doi_checksum != ZIO_CHECKSUM_INHERIT || verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (K=%s)", ZDB_CHECKSUM_NAME(doi.doi_checksum));
}
if (doi.doi_compress == ZIO_COMPRESS_INHERIT &&
ZIO_COMPRESS_HASLEVEL(os->os_compress) && verbosity >= 6) {
const char *compname = NULL;
if (zfs_prop_index_to_string(ZFS_PROP_COMPRESSION,
ZIO_COMPRESS_RAW(os->os_compress, os->os_complevel),
&compname) == 0) {
(void) snprintf(aux + strlen(aux),
sizeof (aux) - strlen(aux), " (Z=inherit=%s)",
compname);
} else {
(void) snprintf(aux + strlen(aux),
sizeof (aux) - strlen(aux),
" (Z=inherit=%s-unknown)",
ZDB_COMPRESS_NAME(os->os_compress));
}
} else if (doi.doi_compress == ZIO_COMPRESS_INHERIT && verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (Z=inherit=%s)", ZDB_COMPRESS_NAME(os->os_compress));
} else if (doi.doi_compress != ZIO_COMPRESS_INHERIT || verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (Z=%s)", ZDB_COMPRESS_NAME(doi.doi_compress));
}
(void) printf("%10lld %3u %5s %5s %5s %6s %5s %6s %s%s\n",
(u_longlong_t)object, doi.doi_indirection, iblk, dblk,
asize, dnsize, lsize, fill, zdb_ot_name(doi.doi_type), aux);
if (doi.doi_bonus_type != DMU_OT_NONE && verbosity > 3) {
(void) printf("%10s %3s %5s %5s %5s %5s %5s %6s %s\n",
"", "", "", "", "", "", bonus_size, "bonus",
zdb_ot_name(doi.doi_bonus_type));
}
if (verbosity >= 4) {
(void) printf("\tdnode flags: %s%s%s%s\n",
(dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) ?
"USED_BYTES " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED) ?
"USERUSED_ACCOUNTED " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_USEROBJUSED_ACCOUNTED) ?
"USEROBJUSED_ACCOUNTED " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ?
"SPILL_BLKPTR" : "");
(void) printf("\tdnode maxblkid: %llu\n",
(longlong_t)dn->dn_phys->dn_maxblkid);
if (!dnode_held) {
object_viewer[ZDB_OT_TYPE(doi.doi_bonus_type)](os,
object, bonus, bsize);
} else {
(void) printf("\t\t(bonus encrypted)\n");
}
if (key_loaded ||
(!os->os_encrypted || !DMU_OT_IS_ENCRYPTED(doi.doi_type))) {
object_viewer[ZDB_OT_TYPE(doi.doi_type)](os, object,
NULL, 0);
} else {
(void) printf("\t\t(object encrypted)\n");
}
*print_header = B_TRUE;
}
if (verbosity >= 5) {
if (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf),
DN_SPILL_BLKPTR(dn->dn_phys), B_FALSE);
(void) printf("\nSpill block: %s\n", blkbuf);
}
dump_indirect(dn);
}
if (verbosity >= 5) {
/*
* Report the list of segments that comprise the object.
*/
uint64_t start = 0;
uint64_t end;
uint64_t blkfill = 1;
int minlvl = 1;
if (dn->dn_type == DMU_OT_DNODE) {
minlvl = 0;
blkfill = DNODES_PER_BLOCK;
}
for (;;) {
char segsize[32];
/* make sure nicenum has enough space */
_Static_assert(sizeof (segsize) >= NN_NUMBUF_SZ,
"segsize truncated");
error = dnode_next_offset(dn,
0, &start, minlvl, blkfill, 0);
if (error)
break;
end = start;
error = dnode_next_offset(dn,
DNODE_FIND_HOLE, &end, minlvl, blkfill, 0);
zdb_nicenum(end - start, segsize, sizeof (segsize));
(void) printf("\t\tsegment [%016llx, %016llx)"
" size %5s\n", (u_longlong_t)start,
(u_longlong_t)end, segsize);
if (error)
break;
start = end;
}
}
out:
if (db != NULL)
dmu_buf_rele(db, FTAG);
if (dnode_held)
dnode_rele(dn, FTAG);
}
static void
count_dir_mos_objects(dsl_dir_t *dd)
{
mos_obj_refd(dd->dd_object);
mos_obj_refd(dsl_dir_phys(dd)->dd_child_dir_zapobj);
mos_obj_refd(dsl_dir_phys(dd)->dd_deleg_zapobj);
mos_obj_refd(dsl_dir_phys(dd)->dd_props_zapobj);
mos_obj_refd(dsl_dir_phys(dd)->dd_clones);
/*
* The dd_crypto_obj can be referenced by multiple dsl_dir's.
* Ignore the references after the first one.
*/
mos_obj_refd_multiple(dd->dd_crypto_obj);
}
static void
count_ds_mos_objects(dsl_dataset_t *ds)
{
mos_obj_refd(ds->ds_object);
mos_obj_refd(dsl_dataset_phys(ds)->ds_next_clones_obj);
mos_obj_refd(dsl_dataset_phys(ds)->ds_props_obj);
mos_obj_refd(dsl_dataset_phys(ds)->ds_userrefs_obj);
mos_obj_refd(dsl_dataset_phys(ds)->ds_snapnames_zapobj);
mos_obj_refd(ds->ds_bookmarks_obj);
if (!dsl_dataset_is_snapshot(ds)) {
count_dir_mos_objects(ds->ds_dir);
}
}
static const char *const objset_types[DMU_OST_NUMTYPES] = {
"NONE", "META", "ZPL", "ZVOL", "OTHER", "ANY" };
/*
* Parse a string denoting a range of object IDs of the form
* <start>[:<end>[:flags]], and store the results in zor.
* Return 0 on success. On error, return 1 and update the msg
* pointer to point to a descriptive error message.
*/
static int
parse_object_range(char *range, zopt_object_range_t *zor, const char **msg)
{
uint64_t flags = 0;
char *p, *s, *dup, *flagstr, *tmp = NULL;
size_t len;
int i;
int rc = 0;
if (strchr(range, ':') == NULL) {
zor->zor_obj_start = strtoull(range, &p, 0);
if (*p != '\0') {
*msg = "Invalid characters in object ID";
rc = 1;
}
zor->zor_obj_start = ZDB_MAP_OBJECT_ID(zor->zor_obj_start);
zor->zor_obj_end = zor->zor_obj_start;
return (rc);
}
if (strchr(range, ':') == range) {
*msg = "Invalid leading colon";
rc = 1;
return (rc);
}
len = strlen(range);
if (range[len - 1] == ':') {
*msg = "Invalid trailing colon";
rc = 1;
return (rc);
}
dup = strdup(range);
s = strtok_r(dup, ":", &tmp);
zor->zor_obj_start = strtoull(s, &p, 0);
if (*p != '\0') {
*msg = "Invalid characters in start object ID";
rc = 1;
goto out;
}
s = strtok_r(NULL, ":", &tmp);
zor->zor_obj_end = strtoull(s, &p, 0);
if (*p != '\0') {
*msg = "Invalid characters in end object ID";
rc = 1;
goto out;
}
if (zor->zor_obj_start > zor->zor_obj_end) {
*msg = "Start object ID may not exceed end object ID";
rc = 1;
goto out;
}
s = strtok_r(NULL, ":", &tmp);
if (s == NULL) {
zor->zor_flags = ZOR_FLAG_ALL_TYPES;
goto out;
} else if (strtok_r(NULL, ":", &tmp) != NULL) {
*msg = "Invalid colon-delimited field after flags";
rc = 1;
goto out;
}
flagstr = s;
for (i = 0; flagstr[i]; i++) {
int bit;
boolean_t negation = (flagstr[i] == '-');
if (negation) {
i++;
if (flagstr[i] == '\0') {
*msg = "Invalid trailing negation operator";
rc = 1;
goto out;
}
}
bit = flagbits[(uchar_t)flagstr[i]];
if (bit == 0) {
*msg = "Invalid flag";
rc = 1;
goto out;
}
if (negation)
flags &= ~bit;
else
flags |= bit;
}
zor->zor_flags = flags;
zor->zor_obj_start = ZDB_MAP_OBJECT_ID(zor->zor_obj_start);
zor->zor_obj_end = ZDB_MAP_OBJECT_ID(zor->zor_obj_end);
out:
free(dup);
return (rc);
}
static void
dump_objset(objset_t *os)
{
dmu_objset_stats_t dds = { 0 };
uint64_t object, object_count;
uint64_t refdbytes, usedobjs, scratch;
char numbuf[32];
char blkbuf[BP_SPRINTF_LEN + 20];
char osname[ZFS_MAX_DATASET_NAME_LEN];
const char *type = "UNKNOWN";
int verbosity = dump_opt['d'];
boolean_t print_header;
unsigned i;
int error;
uint64_t total_slots_used = 0;
uint64_t max_slot_used = 0;
uint64_t dnode_slots;
uint64_t obj_start;
uint64_t obj_end;
uint64_t flags;
/* make sure nicenum has enough space */
_Static_assert(sizeof (numbuf) >= NN_NUMBUF_SZ, "numbuf truncated");
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
dmu_objset_fast_stat(os, &dds);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
print_header = B_TRUE;
if (dds.dds_type < DMU_OST_NUMTYPES)
type = objset_types[dds.dds_type];
if (dds.dds_type == DMU_OST_META) {
dds.dds_creation_txg = TXG_INITIAL;
usedobjs = BP_GET_FILL(os->os_rootbp);
refdbytes = dsl_dir_phys(os->os_spa->spa_dsl_pool->dp_mos_dir)->
dd_used_bytes;
} else {
dmu_objset_space(os, &refdbytes, &scratch, &usedobjs, &scratch);
}
ASSERT3U(usedobjs, ==, BP_GET_FILL(os->os_rootbp));
zdb_nicenum(refdbytes, numbuf, sizeof (numbuf));
if (verbosity >= 4) {
(void) snprintf(blkbuf, sizeof (blkbuf), ", rootbp ");
(void) snprintf_blkptr(blkbuf + strlen(blkbuf),
sizeof (blkbuf) - strlen(blkbuf), os->os_rootbp);
} else {
blkbuf[0] = '\0';
}
dmu_objset_name(os, osname);
(void) printf("Dataset %s [%s], ID %llu, cr_txg %llu, "
"%s, %llu objects%s%s\n",
osname, type, (u_longlong_t)dmu_objset_id(os),
(u_longlong_t)dds.dds_creation_txg,
numbuf, (u_longlong_t)usedobjs, blkbuf,
(dds.dds_inconsistent) ? " (inconsistent)" : "");
for (i = 0; i < zopt_object_args; i++) {
obj_start = zopt_object_ranges[i].zor_obj_start;
obj_end = zopt_object_ranges[i].zor_obj_end;
flags = zopt_object_ranges[i].zor_flags;
object = obj_start;
if (object == 0 || obj_start == obj_end)
dump_object(os, object, verbosity, &print_header, NULL,
flags);
else
object--;
while ((dmu_object_next(os, &object, B_FALSE, 0) == 0) &&
object <= obj_end) {
dump_object(os, object, verbosity, &print_header, NULL,
flags);
}
}
if (zopt_object_args > 0) {
(void) printf("\n");
return;
}
if (dump_opt['i'] != 0 || verbosity >= 2)
dump_intent_log(dmu_objset_zil(os));
if (dmu_objset_ds(os) != NULL) {
dsl_dataset_t *ds = dmu_objset_ds(os);
dump_blkptr_list(&ds->ds_deadlist, "Deadlist");
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
!dmu_objset_is_snapshot(os)) {
dump_blkptr_list(&ds->ds_dir->dd_livelist, "Livelist");
if (verify_dd_livelist(os) != 0)
fatal("livelist is incorrect");
}
if (dsl_dataset_remap_deadlist_exists(ds)) {
(void) printf("ds_remap_deadlist:\n");
dump_blkptr_list(&ds->ds_remap_deadlist, "Deadlist");
}
count_ds_mos_objects(ds);
}
if (dmu_objset_ds(os) != NULL)
dump_bookmarks(os, verbosity);
if (verbosity < 2)
return;
if (BP_IS_HOLE(os->os_rootbp))
return;
dump_object(os, 0, verbosity, &print_header, NULL, 0);
object_count = 0;
if (DMU_USERUSED_DNODE(os) != NULL &&
DMU_USERUSED_DNODE(os)->dn_type != 0) {
dump_object(os, DMU_USERUSED_OBJECT, verbosity, &print_header,
NULL, 0);
dump_object(os, DMU_GROUPUSED_OBJECT, verbosity, &print_header,
NULL, 0);
}
if (DMU_PROJECTUSED_DNODE(os) != NULL &&
DMU_PROJECTUSED_DNODE(os)->dn_type != 0)
dump_object(os, DMU_PROJECTUSED_OBJECT, verbosity,
&print_header, NULL, 0);
object = 0;
while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) {
dump_object(os, object, verbosity, &print_header, &dnode_slots,
0);
object_count++;
total_slots_used += dnode_slots;
max_slot_used = object + dnode_slots - 1;
}
(void) printf("\n");
(void) printf(" Dnode slots:\n");
(void) printf("\tTotal used: %10llu\n",
(u_longlong_t)total_slots_used);
(void) printf("\tMax used: %10llu\n",
(u_longlong_t)max_slot_used);
(void) printf("\tPercent empty: %10lf\n",
(double)(max_slot_used - total_slots_used)*100 /
(double)max_slot_used);
(void) printf("\n");
if (error != ESRCH) {
(void) fprintf(stderr, "dmu_object_next() = %d\n", error);
abort();
}
ASSERT3U(object_count, ==, usedobjs);
if (leaked_objects != 0) {
(void) printf("%d potentially leaked objects detected\n",
leaked_objects);
leaked_objects = 0;
}
}
static void
dump_uberblock(uberblock_t *ub, const char *header, const char *footer)
{
time_t timestamp = ub->ub_timestamp;
(void) printf("%s", header ? header : "");
(void) printf("\tmagic = %016llx\n", (u_longlong_t)ub->ub_magic);
(void) printf("\tversion = %llu\n", (u_longlong_t)ub->ub_version);
(void) printf("\ttxg = %llu\n", (u_longlong_t)ub->ub_txg);
(void) printf("\tguid_sum = %llu\n", (u_longlong_t)ub->ub_guid_sum);
(void) printf("\ttimestamp = %llu UTC = %s",
(u_longlong_t)ub->ub_timestamp, ctime(&timestamp));
(void) printf("\tmmp_magic = %016llx\n",
(u_longlong_t)ub->ub_mmp_magic);
if (MMP_VALID(ub)) {
(void) printf("\tmmp_delay = %0llu\n",
(u_longlong_t)ub->ub_mmp_delay);
if (MMP_SEQ_VALID(ub))
(void) printf("\tmmp_seq = %u\n",
(unsigned int) MMP_SEQ(ub));
if (MMP_FAIL_INT_VALID(ub))
(void) printf("\tmmp_fail = %u\n",
(unsigned int) MMP_FAIL_INT(ub));
if (MMP_INTERVAL_VALID(ub))
(void) printf("\tmmp_write = %u\n",
(unsigned int) MMP_INTERVAL(ub));
/* After MMP_* to make summarize_uberblock_mmp cleaner */
(void) printf("\tmmp_valid = %x\n",
(unsigned int) ub->ub_mmp_config & 0xFF);
}
if (dump_opt['u'] >= 4) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), &ub->ub_rootbp);
(void) printf("\trootbp = %s\n", blkbuf);
}
(void) printf("\tcheckpoint_txg = %llu\n",
(u_longlong_t)ub->ub_checkpoint_txg);
(void) printf("%s", footer ? footer : "");
}
static void
dump_config(spa_t *spa)
{
dmu_buf_t *db;
size_t nvsize = 0;
int error = 0;
error = dmu_bonus_hold(spa->spa_meta_objset,
spa->spa_config_object, FTAG, &db);
if (error == 0) {
nvsize = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
(void) printf("\nMOS Configuration:\n");
dump_packed_nvlist(spa->spa_meta_objset,
spa->spa_config_object, (void *)&nvsize, 1);
} else {
(void) fprintf(stderr, "dmu_bonus_hold(%llu) failed, errno %d",
(u_longlong_t)spa->spa_config_object, error);
}
}
static void
dump_cachefile(const char *cachefile)
{
int fd;
struct stat64 statbuf;
char *buf;
nvlist_t *config;
if ((fd = open64(cachefile, O_RDONLY)) < 0) {
(void) printf("cannot open '%s': %s\n", cachefile,
strerror(errno));
exit(1);
}
if (fstat64(fd, &statbuf) != 0) {
(void) printf("failed to stat '%s': %s\n", cachefile,
strerror(errno));
exit(1);
}
if ((buf = malloc(statbuf.st_size)) == NULL) {
(void) fprintf(stderr, "failed to allocate %llu bytes\n",
(u_longlong_t)statbuf.st_size);
exit(1);
}
if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
(void) fprintf(stderr, "failed to read %llu bytes\n",
(u_longlong_t)statbuf.st_size);
exit(1);
}
(void) close(fd);
if (nvlist_unpack(buf, statbuf.st_size, &config, 0) != 0) {
(void) fprintf(stderr, "failed to unpack nvlist\n");
exit(1);
}
free(buf);
dump_nvlist(config, 0);
nvlist_free(config);
}
/*
* ZFS label nvlist stats
*/
typedef struct zdb_nvl_stats {
int zns_list_count;
int zns_leaf_count;
size_t zns_leaf_largest;
size_t zns_leaf_total;
nvlist_t *zns_string;
nvlist_t *zns_uint64;
nvlist_t *zns_boolean;
} zdb_nvl_stats_t;
static void
collect_nvlist_stats(nvlist_t *nvl, zdb_nvl_stats_t *stats)
{
nvlist_t *list, **array;
nvpair_t *nvp = NULL;
const char *name;
uint_t i, items;
stats->zns_list_count++;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
name = nvpair_name(nvp);
switch (nvpair_type(nvp)) {
case DATA_TYPE_STRING:
fnvlist_add_string(stats->zns_string, name,
fnvpair_value_string(nvp));
break;
case DATA_TYPE_UINT64:
fnvlist_add_uint64(stats->zns_uint64, name,
fnvpair_value_uint64(nvp));
break;
case DATA_TYPE_BOOLEAN:
fnvlist_add_boolean(stats->zns_boolean, name);
break;
case DATA_TYPE_NVLIST:
if (nvpair_value_nvlist(nvp, &list) == 0)
collect_nvlist_stats(list, stats);
break;
case DATA_TYPE_NVLIST_ARRAY:
if (nvpair_value_nvlist_array(nvp, &array, &items) != 0)
break;
for (i = 0; i < items; i++) {
collect_nvlist_stats(array[i], stats);
/* collect stats on leaf vdev */
if (strcmp(name, "children") == 0) {
size_t size;
(void) nvlist_size(array[i], &size,
NV_ENCODE_XDR);
stats->zns_leaf_total += size;
if (size > stats->zns_leaf_largest)
stats->zns_leaf_largest = size;
stats->zns_leaf_count++;
}
}
break;
default:
(void) printf("skip type %d!\n", (int)nvpair_type(nvp));
}
}
}
static void
dump_nvlist_stats(nvlist_t *nvl, size_t cap)
{
zdb_nvl_stats_t stats = { 0 };
size_t size, sum = 0, total;
size_t noise;
/* requires nvlist with non-unique names for stat collection */
VERIFY0(nvlist_alloc(&stats.zns_string, 0, 0));
VERIFY0(nvlist_alloc(&stats.zns_uint64, 0, 0));
VERIFY0(nvlist_alloc(&stats.zns_boolean, 0, 0));
VERIFY0(nvlist_size(stats.zns_boolean, &noise, NV_ENCODE_XDR));
(void) printf("\n\nZFS Label NVList Config Stats:\n");
VERIFY0(nvlist_size(nvl, &total, NV_ENCODE_XDR));
(void) printf(" %d bytes used, %d bytes free (using %4.1f%%)\n\n",
(int)total, (int)(cap - total), 100.0 * total / cap);
collect_nvlist_stats(nvl, &stats);
VERIFY0(nvlist_size(stats.zns_uint64, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "integers:",
(int)fnvlist_num_pairs(stats.zns_uint64),
(int)size, 100.0 * size / total);
VERIFY0(nvlist_size(stats.zns_string, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "strings:",
(int)fnvlist_num_pairs(stats.zns_string),
(int)size, 100.0 * size / total);
VERIFY0(nvlist_size(stats.zns_boolean, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "booleans:",
(int)fnvlist_num_pairs(stats.zns_boolean),
(int)size, 100.0 * size / total);
size = total - sum; /* treat remainder as nvlist overhead */
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n\n", "nvlists:",
stats.zns_list_count, (int)size, 100.0 * size / total);
if (stats.zns_leaf_count > 0) {
size_t average = stats.zns_leaf_total / stats.zns_leaf_count;
(void) printf("%12s %4d %6d bytes average\n", "leaf vdevs:",
stats.zns_leaf_count, (int)average);
(void) printf("%24d bytes largest\n",
(int)stats.zns_leaf_largest);
if (dump_opt['l'] >= 3 && average > 0)
(void) printf(" space for %d additional leaf vdevs\n",
(int)((cap - total) / average));
}
(void) printf("\n");
nvlist_free(stats.zns_string);
nvlist_free(stats.zns_uint64);
nvlist_free(stats.zns_boolean);
}
typedef struct cksum_record {
zio_cksum_t cksum;
boolean_t labels[VDEV_LABELS];
avl_node_t link;
} cksum_record_t;
static int
cksum_record_compare(const void *x1, const void *x2)
{
const cksum_record_t *l = (cksum_record_t *)x1;
const cksum_record_t *r = (cksum_record_t *)x2;
int arraysize = ARRAY_SIZE(l->cksum.zc_word);
int difference = 0;
for (int i = 0; i < arraysize; i++) {
difference = TREE_CMP(l->cksum.zc_word[i], r->cksum.zc_word[i]);
if (difference)
break;
}
return (difference);
}
static cksum_record_t *
cksum_record_alloc(zio_cksum_t *cksum, int l)
{
cksum_record_t *rec;
rec = umem_zalloc(sizeof (*rec), UMEM_NOFAIL);
rec->cksum = *cksum;
rec->labels[l] = B_TRUE;
return (rec);
}
static cksum_record_t *
cksum_record_lookup(avl_tree_t *tree, zio_cksum_t *cksum)
{
cksum_record_t lookup = { .cksum = *cksum };
avl_index_t where;
return (avl_find(tree, &lookup, &where));
}
static cksum_record_t *
cksum_record_insert(avl_tree_t *tree, zio_cksum_t *cksum, int l)
{
cksum_record_t *rec;
rec = cksum_record_lookup(tree, cksum);
if (rec) {
rec->labels[l] = B_TRUE;
} else {
rec = cksum_record_alloc(cksum, l);
avl_add(tree, rec);
}
return (rec);
}
static int
first_label(cksum_record_t *rec)
{
for (int i = 0; i < VDEV_LABELS; i++)
if (rec->labels[i])
return (i);
return (-1);
}
static void
print_label_numbers(const char *prefix, const cksum_record_t *rec)
{
fputs(prefix, stdout);
for (int i = 0; i < VDEV_LABELS; i++)
if (rec->labels[i] == B_TRUE)
printf("%d ", i);
putchar('\n');
}
#define MAX_UBERBLOCK_COUNT (VDEV_UBERBLOCK_RING >> UBERBLOCK_SHIFT)
typedef struct zdb_label {
vdev_label_t label;
uint64_t label_offset;
nvlist_t *config_nv;
cksum_record_t *config;
cksum_record_t *uberblocks[MAX_UBERBLOCK_COUNT];
boolean_t header_printed;
boolean_t read_failed;
boolean_t cksum_valid;
} zdb_label_t;
static void
print_label_header(zdb_label_t *label, int l)
{
if (dump_opt['q'])
return;
if (label->header_printed == B_TRUE)
return;
(void) printf("------------------------------------\n");
(void) printf("LABEL %d %s\n", l,
label->cksum_valid ? "" : "(Bad label cksum)");
(void) printf("------------------------------------\n");
label->header_printed = B_TRUE;
}
static void
print_l2arc_header(void)
{
(void) printf("------------------------------------\n");
(void) printf("L2ARC device header\n");
(void) printf("------------------------------------\n");
}
static void
print_l2arc_log_blocks(void)
{
(void) printf("------------------------------------\n");
(void) printf("L2ARC device log blocks\n");
(void) printf("------------------------------------\n");
}
static void
dump_l2arc_log_entries(uint64_t log_entries,
l2arc_log_ent_phys_t *le, uint64_t i)
{
for (int j = 0; j < log_entries; j++) {
dva_t dva = le[j].le_dva;
(void) printf("lb[%4llu]\tle[%4d]\tDVA asize: %llu, "
"vdev: %llu, offset: %llu\n",
(u_longlong_t)i, j + 1,
(u_longlong_t)DVA_GET_ASIZE(&dva),
(u_longlong_t)DVA_GET_VDEV(&dva),
(u_longlong_t)DVA_GET_OFFSET(&dva));
(void) printf("|\t\t\t\tbirth: %llu\n",
(u_longlong_t)le[j].le_birth);
(void) printf("|\t\t\t\tlsize: %llu\n",
(u_longlong_t)L2BLK_GET_LSIZE((&le[j])->le_prop));
(void) printf("|\t\t\t\tpsize: %llu\n",
(u_longlong_t)L2BLK_GET_PSIZE((&le[j])->le_prop));
(void) printf("|\t\t\t\tcompr: %llu\n",
(u_longlong_t)L2BLK_GET_COMPRESS((&le[j])->le_prop));
(void) printf("|\t\t\t\tcomplevel: %llu\n",
(u_longlong_t)(&le[j])->le_complevel);
(void) printf("|\t\t\t\ttype: %llu\n",
(u_longlong_t)L2BLK_GET_TYPE((&le[j])->le_prop));
(void) printf("|\t\t\t\tprotected: %llu\n",
(u_longlong_t)L2BLK_GET_PROTECTED((&le[j])->le_prop));
(void) printf("|\t\t\t\tprefetch: %llu\n",
(u_longlong_t)L2BLK_GET_PREFETCH((&le[j])->le_prop));
(void) printf("|\t\t\t\taddress: %llu\n",
(u_longlong_t)le[j].le_daddr);
(void) printf("|\t\t\t\tARC state: %llu\n",
(u_longlong_t)L2BLK_GET_STATE((&le[j])->le_prop));
(void) printf("|\n");
}
(void) printf("\n");
}
static void
dump_l2arc_log_blkptr(const l2arc_log_blkptr_t *lbps)
{
(void) printf("|\t\tdaddr: %llu\n", (u_longlong_t)lbps->lbp_daddr);
(void) printf("|\t\tpayload_asize: %llu\n",
(u_longlong_t)lbps->lbp_payload_asize);
(void) printf("|\t\tpayload_start: %llu\n",
(u_longlong_t)lbps->lbp_payload_start);
(void) printf("|\t\tlsize: %llu\n",
(u_longlong_t)L2BLK_GET_LSIZE(lbps->lbp_prop));
(void) printf("|\t\tasize: %llu\n",
(u_longlong_t)L2BLK_GET_PSIZE(lbps->lbp_prop));
(void) printf("|\t\tcompralgo: %llu\n",
(u_longlong_t)L2BLK_GET_COMPRESS(lbps->lbp_prop));
(void) printf("|\t\tcksumalgo: %llu\n",
(u_longlong_t)L2BLK_GET_CHECKSUM(lbps->lbp_prop));
(void) printf("|\n\n");
}
static void
dump_l2arc_log_blocks(int fd, const l2arc_dev_hdr_phys_t *l2dhdr,
l2arc_dev_hdr_phys_t *rebuild)
{
l2arc_log_blk_phys_t this_lb;
uint64_t asize;
l2arc_log_blkptr_t lbps[2];
abd_t *abd;
zio_cksum_t cksum;
int failed = 0;
l2arc_dev_t dev;
if (!dump_opt['q'])
print_l2arc_log_blocks();
memcpy(lbps, l2dhdr->dh_start_lbps, sizeof (lbps));
dev.l2ad_evict = l2dhdr->dh_evict;
dev.l2ad_start = l2dhdr->dh_start;
dev.l2ad_end = l2dhdr->dh_end;
if (l2dhdr->dh_start_lbps[0].lbp_daddr == 0) {
/* no log blocks to read */
if (!dump_opt['q']) {
(void) printf("No log blocks to read\n");
(void) printf("\n");
}
return;
} else {
dev.l2ad_hand = lbps[0].lbp_daddr +
L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
}
dev.l2ad_first = !!(l2dhdr->dh_flags & L2ARC_DEV_HDR_EVICT_FIRST);
for (;;) {
if (!l2arc_log_blkptr_valid(&dev, &lbps[0]))
break;
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
asize = L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
if (pread64(fd, &this_lb, asize, lbps[0].lbp_daddr) != asize) {
if (!dump_opt['q']) {
(void) printf("Error while reading next log "
"block\n\n");
}
break;
}
fletcher_4_native_varsize(&this_lb, asize, &cksum);
if (!ZIO_CHECKSUM_EQUAL(cksum, lbps[0].lbp_cksum)) {
failed++;
if (!dump_opt['q']) {
(void) printf("Invalid cksum\n");
dump_l2arc_log_blkptr(&lbps[0]);
}
break;
}
switch (L2BLK_GET_COMPRESS((&lbps[0])->lbp_prop)) {
case ZIO_COMPRESS_OFF:
break;
default:
abd = abd_alloc_for_io(asize, B_TRUE);
abd_copy_from_buf_off(abd, &this_lb, 0, asize);
if (zio_decompress_data(L2BLK_GET_COMPRESS(
(&lbps[0])->lbp_prop), abd, &this_lb,
asize, sizeof (this_lb), NULL) != 0) {
(void) printf("L2ARC block decompression "
"failed\n");
abd_free(abd);
goto out;
}
abd_free(abd);
break;
}
if (this_lb.lb_magic == BSWAP_64(L2ARC_LOG_BLK_MAGIC))
byteswap_uint64_array(&this_lb, sizeof (this_lb));
if (this_lb.lb_magic != L2ARC_LOG_BLK_MAGIC) {
if (!dump_opt['q'])
(void) printf("Invalid log block magic\n\n");
break;
}
rebuild->dh_lb_count++;
rebuild->dh_lb_asize += asize;
if (dump_opt['l'] > 1 && !dump_opt['q']) {
(void) printf("lb[%4llu]\tmagic: %llu\n",
(u_longlong_t)rebuild->dh_lb_count,
(u_longlong_t)this_lb.lb_magic);
dump_l2arc_log_blkptr(&lbps[0]);
}
if (dump_opt['l'] > 2 && !dump_opt['q'])
dump_l2arc_log_entries(l2dhdr->dh_log_entries,
this_lb.lb_entries,
rebuild->dh_lb_count);
if (l2arc_range_check_overlap(lbps[1].lbp_payload_start,
lbps[0].lbp_payload_start, dev.l2ad_evict) &&
!dev.l2ad_first)
break;
lbps[0] = lbps[1];
lbps[1] = this_lb.lb_prev_lbp;
}
out:
if (!dump_opt['q']) {
(void) printf("log_blk_count:\t %llu with valid cksum\n",
(u_longlong_t)rebuild->dh_lb_count);
(void) printf("\t\t %d with invalid cksum\n", failed);
(void) printf("log_blk_asize:\t %llu\n\n",
(u_longlong_t)rebuild->dh_lb_asize);
}
}
static int
dump_l2arc_header(int fd)
{
l2arc_dev_hdr_phys_t l2dhdr = {0}, rebuild = {0};
int error = B_FALSE;
if (pread64(fd, &l2dhdr, sizeof (l2dhdr),
VDEV_LABEL_START_SIZE) != sizeof (l2dhdr)) {
error = B_TRUE;
} else {
if (l2dhdr.dh_magic == BSWAP_64(L2ARC_DEV_HDR_MAGIC))
byteswap_uint64_array(&l2dhdr, sizeof (l2dhdr));
if (l2dhdr.dh_magic != L2ARC_DEV_HDR_MAGIC)
error = B_TRUE;
}
if (error) {
(void) printf("L2ARC device header not found\n\n");
/* Do not return an error here for backward compatibility */
return (0);
} else if (!dump_opt['q']) {
print_l2arc_header();
(void) printf(" magic: %llu\n",
(u_longlong_t)l2dhdr.dh_magic);
(void) printf(" version: %llu\n",
(u_longlong_t)l2dhdr.dh_version);
(void) printf(" pool_guid: %llu\n",
(u_longlong_t)l2dhdr.dh_spa_guid);
(void) printf(" flags: %llu\n",
(u_longlong_t)l2dhdr.dh_flags);
(void) printf(" start_lbps[0]: %llu\n",
(u_longlong_t)
l2dhdr.dh_start_lbps[0].lbp_daddr);
(void) printf(" start_lbps[1]: %llu\n",
(u_longlong_t)
l2dhdr.dh_start_lbps[1].lbp_daddr);
(void) printf(" log_blk_ent: %llu\n",
(u_longlong_t)l2dhdr.dh_log_entries);
(void) printf(" start: %llu\n",
(u_longlong_t)l2dhdr.dh_start);
(void) printf(" end: %llu\n",
(u_longlong_t)l2dhdr.dh_end);
(void) printf(" evict: %llu\n",
(u_longlong_t)l2dhdr.dh_evict);
(void) printf(" lb_asize_refcount: %llu\n",
(u_longlong_t)l2dhdr.dh_lb_asize);
(void) printf(" lb_count_refcount: %llu\n",
(u_longlong_t)l2dhdr.dh_lb_count);
(void) printf(" trim_action_time: %llu\n",
(u_longlong_t)l2dhdr.dh_trim_action_time);
(void) printf(" trim_state: %llu\n\n",
(u_longlong_t)l2dhdr.dh_trim_state);
}
dump_l2arc_log_blocks(fd, &l2dhdr, &rebuild);
/*
* The total aligned size of log blocks and the number of log blocks
* reported in the header of the device may be less than what zdb
* reports by dump_l2arc_log_blocks() which emulates l2arc_rebuild().
* This happens because dump_l2arc_log_blocks() lacks the memory
* pressure valve that l2arc_rebuild() has. Thus, if we are on a system
* with low memory, l2arc_rebuild will exit prematurely and dh_lb_asize
* and dh_lb_count will be lower to begin with than what exists on the
* device. This is normal and zdb should not exit with an error. The
* opposite case should never happen though, the values reported in the
* header should never be higher than what dump_l2arc_log_blocks() and
* l2arc_rebuild() report. If this happens there is a leak in the
* accounting of log blocks.
*/
if (l2dhdr.dh_lb_asize > rebuild.dh_lb_asize ||
l2dhdr.dh_lb_count > rebuild.dh_lb_count)
return (1);
return (0);
}
static void
dump_config_from_label(zdb_label_t *label, size_t buflen, int l)
{
if (dump_opt['q'])
return;
if ((dump_opt['l'] < 3) && (first_label(label->config) != l))
return;
print_label_header(label, l);
dump_nvlist(label->config_nv, 4);
print_label_numbers(" labels = ", label->config);
if (dump_opt['l'] >= 2)
dump_nvlist_stats(label->config_nv, buflen);
}
#define ZDB_MAX_UB_HEADER_SIZE 32
static void
dump_label_uberblocks(zdb_label_t *label, uint64_t ashift, int label_num)
{
vdev_t vd;
char header[ZDB_MAX_UB_HEADER_SIZE];
vd.vdev_ashift = ashift;
vd.vdev_top = &vd;
for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) {
uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i);
uberblock_t *ub = (void *)((char *)&label->label + uoff);
cksum_record_t *rec = label->uberblocks[i];
if (rec == NULL) {
if (dump_opt['u'] >= 2) {
print_label_header(label, label_num);
(void) printf(" Uberblock[%d] invalid\n", i);
}
continue;
}
if ((dump_opt['u'] < 3) && (first_label(rec) != label_num))
continue;
if ((dump_opt['u'] < 4) &&
(ub->ub_mmp_magic == MMP_MAGIC) && ub->ub_mmp_delay &&
(i >= VDEV_UBERBLOCK_COUNT(&vd) - MMP_BLOCKS_PER_LABEL))
continue;
print_label_header(label, label_num);
(void) snprintf(header, ZDB_MAX_UB_HEADER_SIZE,
" Uberblock[%d]\n", i);
dump_uberblock(ub, header, "");
print_label_numbers(" labels = ", rec);
}
}
static char curpath[PATH_MAX];
/*
* Iterate through the path components, recursively passing
* current one's obj and remaining path until we find the obj
* for the last one.
*/
static int
dump_path_impl(objset_t *os, uint64_t obj, char *name, uint64_t *retobj)
{
int err;
boolean_t header = B_TRUE;
uint64_t child_obj;
char *s;
dmu_buf_t *db;
dmu_object_info_t doi;
if ((s = strchr(name, '/')) != NULL)
*s = '\0';
err = zap_lookup(os, obj, name, 8, 1, &child_obj);
(void) strlcat(curpath, name, sizeof (curpath));
if (err != 0) {
(void) fprintf(stderr, "failed to lookup %s: %s\n",
curpath, strerror(err));
return (err);
}
child_obj = ZFS_DIRENT_OBJ(child_obj);
err = sa_buf_hold(os, child_obj, FTAG, &db);
if (err != 0) {
(void) fprintf(stderr,
"failed to get SA dbuf for obj %llu: %s\n",
(u_longlong_t)child_obj, strerror(err));
return (EINVAL);
}
dmu_object_info_from_db(db, &doi);
sa_buf_rele(db, FTAG);
if (doi.doi_bonus_type != DMU_OT_SA &&
doi.doi_bonus_type != DMU_OT_ZNODE) {
(void) fprintf(stderr, "invalid bonus type %d for obj %llu\n",
doi.doi_bonus_type, (u_longlong_t)child_obj);
return (EINVAL);
}
if (dump_opt['v'] > 6) {
(void) printf("obj=%llu %s type=%d bonustype=%d\n",
(u_longlong_t)child_obj, curpath, doi.doi_type,
doi.doi_bonus_type);
}
(void) strlcat(curpath, "/", sizeof (curpath));
switch (doi.doi_type) {
case DMU_OT_DIRECTORY_CONTENTS:
if (s != NULL && *(s + 1) != '\0')
return (dump_path_impl(os, child_obj, s + 1, retobj));
zfs_fallthrough;
case DMU_OT_PLAIN_FILE_CONTENTS:
if (retobj != NULL) {
*retobj = child_obj;
} else {
dump_object(os, child_obj, dump_opt['v'], &header,
NULL, 0);
}
return (0);
default:
(void) fprintf(stderr, "object %llu has non-file/directory "
"type %d\n", (u_longlong_t)obj, doi.doi_type);
break;
}
return (EINVAL);
}
/*
* Dump the blocks for the object specified by path inside the dataset.
*/
static int
dump_path(char *ds, char *path, uint64_t *retobj)
{
int err;
objset_t *os;
uint64_t root_obj;
err = open_objset(ds, FTAG, &os);
if (err != 0)
return (err);
err = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1, &root_obj);
if (err != 0) {
(void) fprintf(stderr, "can't lookup root znode: %s\n",
strerror(err));
close_objset(os, FTAG);
return (EINVAL);
}
(void) snprintf(curpath, sizeof (curpath), "dataset=%s path=/", ds);
err = dump_path_impl(os, root_obj, path, retobj);
close_objset(os, FTAG);
return (err);
}
static int
dump_backup_bytes(objset_t *os, void *buf, int len, void *arg)
{
const char *p = (const char *)buf;
ssize_t nwritten;
(void) os;
(void) arg;
/* Write the data out, handling short writes and signals. */
while ((nwritten = write(STDOUT_FILENO, p, len)) < len) {
if (nwritten < 0) {
if (errno == EINTR)
continue;
return (errno);
}
p += nwritten;
len -= nwritten;
}
return (0);
}
static void
dump_backup(const char *pool, uint64_t objset_id, const char *flagstr)
{
boolean_t embed = B_FALSE;
boolean_t large_block = B_FALSE;
boolean_t compress = B_FALSE;
boolean_t raw = B_FALSE;
const char *c;
for (c = flagstr; c != NULL && *c != '\0'; c++) {
switch (*c) {
case 'e':
embed = B_TRUE;
break;
case 'L':
large_block = B_TRUE;
break;
case 'c':
compress = B_TRUE;
break;
case 'w':
raw = B_TRUE;
break;
default:
fprintf(stderr, "dump_backup: invalid flag "
"'%c'\n", *c);
return;
}
}
if (isatty(STDOUT_FILENO)) {
fprintf(stderr, "dump_backup: stream cannot be written "
"to a terminal\n");
return;
}
offset_t off = 0;
dmu_send_outparams_t out = {
.dso_outfunc = dump_backup_bytes,
.dso_dryrun = B_FALSE,
};
int err = dmu_send_obj(pool, objset_id, /* fromsnap */0, embed,
large_block, compress, raw, /* saved */ B_FALSE, STDOUT_FILENO,
&off, &out);
if (err != 0) {
fprintf(stderr, "dump_backup: dmu_send_obj: %s\n",
strerror(err));
return;
}
}
static int
zdb_copy_object(objset_t *os, uint64_t srcobj, char *destfile)
{
int err = 0;
uint64_t size, readsize, oursize, offset;
ssize_t writesize;
sa_handle_t *hdl;
(void) printf("Copying object %" PRIu64 " to file %s\n", srcobj,
destfile);
VERIFY3P(os, ==, sa_os);
if ((err = sa_handle_get(os, srcobj, NULL, SA_HDL_PRIVATE, &hdl))) {
(void) printf("Failed to get handle for SA znode\n");
return (err);
}
if ((err = sa_lookup(hdl, sa_attr_table[ZPL_SIZE], &size, 8))) {
(void) sa_handle_destroy(hdl);
return (err);
}
(void) sa_handle_destroy(hdl);
(void) printf("Object %" PRIu64 " is %" PRIu64 " bytes\n", srcobj,
size);
if (size == 0) {
return (EINVAL);
}
int fd = open(destfile, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (fd == -1)
return (errno);
/*
* We cap the size at 1 mebibyte here to prevent
* allocation failures and nigh-infinite printing if the
* object is extremely large.
*/
oursize = MIN(size, 1 << 20);
offset = 0;
char *buf = kmem_alloc(oursize, KM_NOSLEEP);
if (buf == NULL) {
(void) close(fd);
return (ENOMEM);
}
while (offset < size) {
readsize = MIN(size - offset, 1 << 20);
err = dmu_read(os, srcobj, offset, readsize, buf, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(buf, oursize);
(void) close(fd);
return (err);
}
if (dump_opt['v'] > 3) {
(void) printf("Read offset=%" PRIu64 " size=%" PRIu64
" error=%d\n", offset, readsize, err);
}
writesize = write(fd, buf, readsize);
if (writesize < 0) {
err = errno;
break;
} else if (writesize != readsize) {
/* Incomplete write */
(void) fprintf(stderr, "Short write, only wrote %llu of"
" %" PRIu64 " bytes, exiting...\n",
(u_longlong_t)writesize, readsize);
break;
}
offset += readsize;
}
(void) close(fd);
if (buf != NULL)
kmem_free(buf, oursize);
return (err);
}
static boolean_t
label_cksum_valid(vdev_label_t *label, uint64_t offset)
{
zio_checksum_info_t *ci = &zio_checksum_table[ZIO_CHECKSUM_LABEL];
zio_cksum_t expected_cksum;
zio_cksum_t actual_cksum;
zio_cksum_t verifier;
zio_eck_t *eck;
int byteswap;
void *data = (char *)label + offsetof(vdev_label_t, vl_vdev_phys);
eck = (zio_eck_t *)((char *)(data) + VDEV_PHYS_SIZE) - 1;
offset += offsetof(vdev_label_t, vl_vdev_phys);
ZIO_SET_CHECKSUM(&verifier, offset, 0, 0, 0);
byteswap = (eck->zec_magic == BSWAP_64(ZEC_MAGIC));
if (byteswap)
byteswap_uint64_array(&verifier, sizeof (zio_cksum_t));
expected_cksum = eck->zec_cksum;
eck->zec_cksum = verifier;
abd_t *abd = abd_get_from_buf(data, VDEV_PHYS_SIZE);
ci->ci_func[byteswap](abd, VDEV_PHYS_SIZE, NULL, &actual_cksum);
abd_free(abd);
if (byteswap)
byteswap_uint64_array(&expected_cksum, sizeof (zio_cksum_t));
if (ZIO_CHECKSUM_EQUAL(actual_cksum, expected_cksum))
return (B_TRUE);
return (B_FALSE);
}
static int
dump_label(const char *dev)
{
char path[MAXPATHLEN];
zdb_label_t labels[VDEV_LABELS] = {{{{0}}}};
uint64_t psize, ashift, l2cache;
struct stat64 statbuf;
boolean_t config_found = B_FALSE;
boolean_t error = B_FALSE;
boolean_t read_l2arc_header = B_FALSE;
avl_tree_t config_tree;
avl_tree_t uberblock_tree;
void *node, *cookie;
int fd;
/*
* Check if we were given absolute path and use it as is.
* Otherwise if the provided vdev name doesn't point to a file,
* try prepending expected disk paths and partition numbers.
*/
(void) strlcpy(path, dev, sizeof (path));
if (dev[0] != '/' && stat64(path, &statbuf) != 0) {
int error;
error = zfs_resolve_shortname(dev, path, MAXPATHLEN);
if (error == 0 && zfs_dev_is_whole_disk(path)) {
if (zfs_append_partition(path, MAXPATHLEN) == -1)
error = ENOENT;
}
if (error || (stat64(path, &statbuf) != 0)) {
(void) printf("failed to find device %s, try "
"specifying absolute path instead\n", dev);
return (1);
}
}
if ((fd = open64(path, O_RDONLY)) < 0) {
(void) printf("cannot open '%s': %s\n", path, strerror(errno));
exit(1);
}
if (fstat64_blk(fd, &statbuf) != 0) {
(void) printf("failed to stat '%s': %s\n", path,
strerror(errno));
(void) close(fd);
exit(1);
}
if (S_ISBLK(statbuf.st_mode) && zfs_dev_flush(fd) != 0)
(void) printf("failed to invalidate cache '%s' : %s\n", path,
strerror(errno));
avl_create(&config_tree, cksum_record_compare,
sizeof (cksum_record_t), offsetof(cksum_record_t, link));
avl_create(&uberblock_tree, cksum_record_compare,
sizeof (cksum_record_t), offsetof(cksum_record_t, link));
psize = statbuf.st_size;
psize = P2ALIGN(psize, (uint64_t)sizeof (vdev_label_t));
ashift = SPA_MINBLOCKSHIFT;
/*
* 1. Read the label from disk
* 2. Verify label cksum
* 3. Unpack the configuration and insert in config tree.
* 4. Traverse all uberblocks and insert in uberblock tree.
*/
for (int l = 0; l < VDEV_LABELS; l++) {
zdb_label_t *label = &labels[l];
char *buf = label->label.vl_vdev_phys.vp_nvlist;
size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist);
nvlist_t *config;
cksum_record_t *rec;
zio_cksum_t cksum;
vdev_t vd;
label->label_offset = vdev_label_offset(psize, l, 0);
if (pread64(fd, &label->label, sizeof (label->label),
label->label_offset) != sizeof (label->label)) {
if (!dump_opt['q'])
(void) printf("failed to read label %d\n", l);
label->read_failed = B_TRUE;
error = B_TRUE;
continue;
}
label->read_failed = B_FALSE;
label->cksum_valid = label_cksum_valid(&label->label,
label->label_offset);
if (nvlist_unpack(buf, buflen, &config, 0) == 0) {
nvlist_t *vdev_tree = NULL;
size_t size;
if ((nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0) ||
(nvlist_lookup_uint64(vdev_tree,
ZPOOL_CONFIG_ASHIFT, &ashift) != 0))
ashift = SPA_MINBLOCKSHIFT;
if (nvlist_size(config, &size, NV_ENCODE_XDR) != 0)
size = buflen;
/* If the device is a cache device clear the header. */
if (!read_l2arc_header) {
if (nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_STATE, &l2cache) == 0 &&
l2cache == POOL_STATE_L2CACHE) {
read_l2arc_header = B_TRUE;
}
}
fletcher_4_native_varsize(buf, size, &cksum);
rec = cksum_record_insert(&config_tree, &cksum, l);
label->config = rec;
label->config_nv = config;
config_found = B_TRUE;
} else {
error = B_TRUE;
}
vd.vdev_ashift = ashift;
vd.vdev_top = &vd;
for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) {
uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i);
uberblock_t *ub = (void *)((char *)label + uoff);
if (uberblock_verify(ub))
continue;
fletcher_4_native_varsize(ub, sizeof (*ub), &cksum);
rec = cksum_record_insert(&uberblock_tree, &cksum, l);
label->uberblocks[i] = rec;
}
}
/*
* Dump the label and uberblocks.
*/
for (int l = 0; l < VDEV_LABELS; l++) {
zdb_label_t *label = &labels[l];
size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist);
if (label->read_failed == B_TRUE)
continue;
if (label->config_nv) {
dump_config_from_label(label, buflen, l);
} else {
if (!dump_opt['q'])
(void) printf("failed to unpack label %d\n", l);
}
if (dump_opt['u'])
dump_label_uberblocks(label, ashift, l);
nvlist_free(label->config_nv);
}
/*
* Dump the L2ARC header, if existent.
*/
if (read_l2arc_header)
error |= dump_l2arc_header(fd);
cookie = NULL;
while ((node = avl_destroy_nodes(&config_tree, &cookie)) != NULL)
umem_free(node, sizeof (cksum_record_t));
cookie = NULL;
while ((node = avl_destroy_nodes(&uberblock_tree, &cookie)) != NULL)
umem_free(node, sizeof (cksum_record_t));
avl_destroy(&config_tree);
avl_destroy(&uberblock_tree);
(void) close(fd);
return (config_found == B_FALSE ? 2 :
(error == B_TRUE ? 1 : 0));
}
static uint64_t dataset_feature_count[SPA_FEATURES];
static uint64_t global_feature_count[SPA_FEATURES];
static uint64_t remap_deadlist_count = 0;
static int
dump_one_objset(const char *dsname, void *arg)
{
(void) arg;
int error;
objset_t *os;
spa_feature_t f;
error = open_objset(dsname, FTAG, &os);
if (error != 0)
return (0);
for (f = 0; f < SPA_FEATURES; f++) {
if (!dsl_dataset_feature_is_active(dmu_objset_ds(os), f))
continue;
ASSERT(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET);
dataset_feature_count[f]++;
}
if (dsl_dataset_remap_deadlist_exists(dmu_objset_ds(os))) {
remap_deadlist_count++;
}
for (dsl_bookmark_node_t *dbn =
avl_first(&dmu_objset_ds(os)->ds_bookmarks); dbn != NULL;
dbn = AVL_NEXT(&dmu_objset_ds(os)->ds_bookmarks, dbn)) {
mos_obj_refd(dbn->dbn_phys.zbm_redaction_obj);
if (dbn->dbn_phys.zbm_redaction_obj != 0)
global_feature_count[SPA_FEATURE_REDACTION_BOOKMARKS]++;
if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)
global_feature_count[SPA_FEATURE_BOOKMARK_WRITTEN]++;
}
if (dsl_deadlist_is_open(&dmu_objset_ds(os)->ds_dir->dd_livelist) &&
!dmu_objset_is_snapshot(os)) {
global_feature_count[SPA_FEATURE_LIVELIST]++;
}
dump_objset(os);
close_objset(os, FTAG);
fuid_table_destroy();
return (0);
}
/*
* Block statistics.
*/
#define PSIZE_HISTO_SIZE (SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 2)
typedef struct zdb_blkstats {
uint64_t zb_asize;
uint64_t zb_lsize;
uint64_t zb_psize;
uint64_t zb_count;
uint64_t zb_gangs;
uint64_t zb_ditto_samevdev;
uint64_t zb_ditto_same_ms;
uint64_t zb_psize_histogram[PSIZE_HISTO_SIZE];
} zdb_blkstats_t;
/*
* Extended object types to report deferred frees and dedup auto-ditto blocks.
*/
#define ZDB_OT_DEFERRED (DMU_OT_NUMTYPES + 0)
#define ZDB_OT_DITTO (DMU_OT_NUMTYPES + 1)
#define ZDB_OT_OTHER (DMU_OT_NUMTYPES + 2)
#define ZDB_OT_TOTAL (DMU_OT_NUMTYPES + 3)
static const char *zdb_ot_extname[] = {
"deferred free",
"dedup ditto",
"other",
"Total",
};
#define ZB_TOTAL DN_MAX_LEVELS
#define SPA_MAX_FOR_16M (SPA_MAXBLOCKSHIFT+1)
+typedef struct zdb_brt_entry {
+ dva_t zbre_dva;
+ uint64_t zbre_refcount;
+ avl_node_t zbre_node;
+} zdb_brt_entry_t;
+
typedef struct zdb_cb {
zdb_blkstats_t zcb_type[ZB_TOTAL + 1][ZDB_OT_TOTAL + 1];
uint64_t zcb_removing_size;
uint64_t zcb_checkpoint_size;
uint64_t zcb_dedup_asize;
uint64_t zcb_dedup_blocks;
+ uint64_t zcb_clone_asize;
+ uint64_t zcb_clone_blocks;
uint64_t zcb_psize_count[SPA_MAX_FOR_16M];
uint64_t zcb_lsize_count[SPA_MAX_FOR_16M];
uint64_t zcb_asize_count[SPA_MAX_FOR_16M];
uint64_t zcb_psize_len[SPA_MAX_FOR_16M];
uint64_t zcb_lsize_len[SPA_MAX_FOR_16M];
uint64_t zcb_asize_len[SPA_MAX_FOR_16M];
uint64_t zcb_psize_total;
uint64_t zcb_lsize_total;
uint64_t zcb_asize_total;
uint64_t zcb_embedded_blocks[NUM_BP_EMBEDDED_TYPES];
uint64_t zcb_embedded_histogram[NUM_BP_EMBEDDED_TYPES]
[BPE_PAYLOAD_SIZE + 1];
uint64_t zcb_start;
hrtime_t zcb_lastprint;
uint64_t zcb_totalasize;
uint64_t zcb_errors[256];
int zcb_readfails;
int zcb_haderrors;
spa_t *zcb_spa;
uint32_t **zcb_vd_obsolete_counts;
+ avl_tree_t zcb_brt;
+ boolean_t zcb_brt_is_active;
} zdb_cb_t;
/* test if two DVA offsets from same vdev are within the same metaslab */
static boolean_t
same_metaslab(spa_t *spa, uint64_t vdev, uint64_t off1, uint64_t off2)
{
vdev_t *vd = vdev_lookup_top(spa, vdev);
uint64_t ms_shift = vd->vdev_ms_shift;
return ((off1 >> ms_shift) == (off2 >> ms_shift));
}
/*
* Used to simplify reporting of the histogram data.
*/
typedef struct one_histo {
const char *name;
uint64_t *count;
uint64_t *len;
uint64_t cumulative;
} one_histo_t;
/*
* The number of separate histograms processed for psize, lsize and asize.
*/
#define NUM_HISTO 3
/*
* This routine will create a fixed column size output of three different
* histograms showing by blocksize of 512 - 2^ SPA_MAX_FOR_16M
* the count, length and cumulative length of the psize, lsize and
* asize blocks.
*
* All three types of blocks are listed on a single line
*
* By default the table is printed in nicenumber format (e.g. 123K) but
* if the '-P' parameter is specified then the full raw number (parseable)
* is printed out.
*/
static void
dump_size_histograms(zdb_cb_t *zcb)
{
/*
* A temporary buffer that allows us to convert a number into
* a string using zdb_nicenumber to allow either raw or human
* readable numbers to be output.
*/
char numbuf[32];
/*
* Define titles which are used in the headers of the tables
* printed by this routine.
*/
const char blocksize_title1[] = "block";
const char blocksize_title2[] = "size";
const char count_title[] = "Count";
const char length_title[] = "Size";
const char cumulative_title[] = "Cum.";
/*
* Setup the histogram arrays (psize, lsize, and asize).
*/
one_histo_t parm_histo[NUM_HISTO];
parm_histo[0].name = "psize";
parm_histo[0].count = zcb->zcb_psize_count;
parm_histo[0].len = zcb->zcb_psize_len;
parm_histo[0].cumulative = 0;
parm_histo[1].name = "lsize";
parm_histo[1].count = zcb->zcb_lsize_count;
parm_histo[1].len = zcb->zcb_lsize_len;
parm_histo[1].cumulative = 0;
parm_histo[2].name = "asize";
parm_histo[2].count = zcb->zcb_asize_count;
parm_histo[2].len = zcb->zcb_asize_len;
parm_histo[2].cumulative = 0;
(void) printf("\nBlock Size Histogram\n");
/*
* Print the first line titles
*/
if (dump_opt['P'])
(void) printf("\n%s\t", blocksize_title1);
else
(void) printf("\n%7s ", blocksize_title1);
for (int j = 0; j < NUM_HISTO; j++) {
if (dump_opt['P']) {
if (j < NUM_HISTO - 1) {
(void) printf("%s\t\t\t", parm_histo[j].name);
} else {
/* Don't print trailing spaces */
(void) printf(" %s", parm_histo[j].name);
}
} else {
if (j < NUM_HISTO - 1) {
/* Left aligned strings in the output */
(void) printf("%-7s ",
parm_histo[j].name);
} else {
/* Don't print trailing spaces */
(void) printf("%s", parm_histo[j].name);
}
}
}
(void) printf("\n");
/*
* Print the second line titles
*/
if (dump_opt['P']) {
(void) printf("%s\t", blocksize_title2);
} else {
(void) printf("%7s ", blocksize_title2);
}
for (int i = 0; i < NUM_HISTO; i++) {
if (dump_opt['P']) {
(void) printf("%s\t%s\t%s\t",
count_title, length_title, cumulative_title);
} else {
(void) printf("%7s%7s%7s",
count_title, length_title, cumulative_title);
}
}
(void) printf("\n");
/*
* Print the rows
*/
for (int i = SPA_MINBLOCKSHIFT; i < SPA_MAX_FOR_16M; i++) {
/*
* Print the first column showing the blocksize
*/
zdb_nicenum((1ULL << i), numbuf, sizeof (numbuf));
if (dump_opt['P']) {
printf("%s", numbuf);
} else {
printf("%7s:", numbuf);
}
/*
* Print the remaining set of 3 columns per size:
* for psize, lsize and asize
*/
for (int j = 0; j < NUM_HISTO; j++) {
parm_histo[j].cumulative += parm_histo[j].len[i];
zdb_nicenum(parm_histo[j].count[i],
numbuf, sizeof (numbuf));
if (dump_opt['P'])
(void) printf("\t%s", numbuf);
else
(void) printf("%7s", numbuf);
zdb_nicenum(parm_histo[j].len[i],
numbuf, sizeof (numbuf));
if (dump_opt['P'])
(void) printf("\t%s", numbuf);
else
(void) printf("%7s", numbuf);
zdb_nicenum(parm_histo[j].cumulative,
numbuf, sizeof (numbuf));
if (dump_opt['P'])
(void) printf("\t%s", numbuf);
else
(void) printf("%7s", numbuf);
}
(void) printf("\n");
}
}
static void
zdb_count_block(zdb_cb_t *zcb, zilog_t *zilog, const blkptr_t *bp,
dmu_object_type_t type)
{
uint64_t refcnt = 0;
int i;
ASSERT(type < ZDB_OT_TOTAL);
if (zilog && zil_bp_tree_add(zilog, bp) != 0)
return;
spa_config_enter(zcb->zcb_spa, SCL_CONFIG, FTAG, RW_READER);
for (i = 0; i < 4; i++) {
int l = (i < 2) ? BP_GET_LEVEL(bp) : ZB_TOTAL;
int t = (i & 1) ? type : ZDB_OT_TOTAL;
int equal;
zdb_blkstats_t *zb = &zcb->zcb_type[l][t];
zb->zb_asize += BP_GET_ASIZE(bp);
zb->zb_lsize += BP_GET_LSIZE(bp);
zb->zb_psize += BP_GET_PSIZE(bp);
zb->zb_count++;
/*
* The histogram is only big enough to record blocks up to
* SPA_OLD_MAXBLOCKSIZE; larger blocks go into the last,
* "other", bucket.
*/
unsigned idx = BP_GET_PSIZE(bp) >> SPA_MINBLOCKSHIFT;
idx = MIN(idx, SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 1);
zb->zb_psize_histogram[idx]++;
zb->zb_gangs += BP_COUNT_GANG(bp);
switch (BP_GET_NDVAS(bp)) {
case 2:
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1])) {
zb->zb_ditto_samevdev++;
if (same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[1])))
zb->zb_ditto_same_ms++;
}
break;
case 3:
equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1])) +
(DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[2])) +
(DVA_GET_VDEV(&bp->blk_dva[1]) ==
DVA_GET_VDEV(&bp->blk_dva[2]));
if (equal != 0) {
zb->zb_ditto_samevdev++;
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1]) &&
same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[1])))
zb->zb_ditto_same_ms++;
else if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[2]) &&
same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[2])))
zb->zb_ditto_same_ms++;
else if (DVA_GET_VDEV(&bp->blk_dva[1]) ==
DVA_GET_VDEV(&bp->blk_dva[2]) &&
same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[1]),
DVA_GET_OFFSET(&bp->blk_dva[1]),
DVA_GET_OFFSET(&bp->blk_dva[2])))
zb->zb_ditto_same_ms++;
}
break;
}
}
spa_config_exit(zcb->zcb_spa, SCL_CONFIG, FTAG);
if (BP_IS_EMBEDDED(bp)) {
zcb->zcb_embedded_blocks[BPE_GET_ETYPE(bp)]++;
zcb->zcb_embedded_histogram[BPE_GET_ETYPE(bp)]
[BPE_GET_PSIZE(bp)]++;
return;
}
/*
* The binning histogram bins by powers of two up to
* SPA_MAXBLOCKSIZE rather than creating bins for
* every possible blocksize found in the pool.
*/
int bin = highbit64(BP_GET_PSIZE(bp)) - 1;
zcb->zcb_psize_count[bin]++;
zcb->zcb_psize_len[bin] += BP_GET_PSIZE(bp);
zcb->zcb_psize_total += BP_GET_PSIZE(bp);
bin = highbit64(BP_GET_LSIZE(bp)) - 1;
zcb->zcb_lsize_count[bin]++;
zcb->zcb_lsize_len[bin] += BP_GET_LSIZE(bp);
zcb->zcb_lsize_total += BP_GET_LSIZE(bp);
bin = highbit64(BP_GET_ASIZE(bp)) - 1;
zcb->zcb_asize_count[bin]++;
zcb->zcb_asize_len[bin] += BP_GET_ASIZE(bp);
zcb->zcb_asize_total += BP_GET_ASIZE(bp);
+ if (zcb->zcb_brt_is_active && brt_maybe_exists(zcb->zcb_spa, bp)) {
+ /*
+ * Cloned blocks are special. We need to count them, so we can
+ * later uncount them when reporting leaked space, and we must
+ * only claim them them once.
+ *
+ * To do this, we keep our own in-memory BRT. For each block
+ * we haven't seen before, we look it up in the real BRT and
+ * if its there, we note it and its refcount then proceed as
+ * normal. If we see the block again, we count it as a clone
+ * and then give it no further consideration.
+ */
+ zdb_brt_entry_t zbre_search, *zbre;
+ avl_index_t where;
+
+ zbre_search.zbre_dva = bp->blk_dva[0];
+ zbre = avl_find(&zcb->zcb_brt, &zbre_search, &where);
+ if (zbre != NULL) {
+ zcb->zcb_clone_asize += BP_GET_ASIZE(bp);
+ zcb->zcb_clone_blocks++;
+
+ zbre->zbre_refcount--;
+ if (zbre->zbre_refcount == 0) {
+ avl_remove(&zcb->zcb_brt, zbre);
+ umem_free(zbre, sizeof (zdb_brt_entry_t));
+ }
+ return;
+ }
+
+ uint64_t crefcnt = brt_entry_get_refcount(zcb->zcb_spa, bp);
+ if (crefcnt > 0) {
+ zbre = umem_zalloc(sizeof (zdb_brt_entry_t),
+ UMEM_NOFAIL);
+ zbre->zbre_dva = bp->blk_dva[0];
+ zbre->zbre_refcount = crefcnt;
+ avl_insert(&zcb->zcb_brt, zbre, where);
+ }
+ }
+
if (dump_opt['L'])
return;
if (BP_GET_DEDUP(bp)) {
ddt_t *ddt;
ddt_entry_t *dde;
ddt = ddt_select(zcb->zcb_spa, bp);
ddt_enter(ddt);
dde = ddt_lookup(ddt, bp, B_FALSE);
if (dde == NULL) {
refcnt = 0;
} else {
ddt_phys_t *ddp = ddt_phys_select(dde, bp);
ddt_phys_decref(ddp);
refcnt = ddp->ddp_refcnt;
if (ddt_phys_total_refcnt(dde) == 0)
ddt_remove(ddt, dde);
}
ddt_exit(ddt);
}
VERIFY3U(zio_wait(zio_claim(NULL, zcb->zcb_spa,
refcnt ? 0 : spa_min_claim_txg(zcb->zcb_spa),
bp, NULL, NULL, ZIO_FLAG_CANFAIL)), ==, 0);
}
static void
zdb_blkptr_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
int ioerr = zio->io_error;
zdb_cb_t *zcb = zio->io_private;
zbookmark_phys_t *zb = &zio->io_bookmark;
mutex_enter(&spa->spa_scrub_lock);
spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
if (ioerr && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
char blkbuf[BP_SPRINTF_LEN];
zcb->zcb_haderrors = 1;
zcb->zcb_errors[ioerr]++;
if (dump_opt['b'] >= 2)
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
else
blkbuf[0] = '\0';
(void) printf("zdb_blkptr_cb: "
"Got error %d reading "
"<%llu, %llu, %lld, %llx> %s -- skipping\n",
ioerr,
(u_longlong_t)zb->zb_objset,
(u_longlong_t)zb->zb_object,
(u_longlong_t)zb->zb_level,
(u_longlong_t)zb->zb_blkid,
blkbuf);
}
mutex_exit(&spa->spa_scrub_lock);
abd_free(zio->io_abd);
}
static int
zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
zdb_cb_t *zcb = arg;
dmu_object_type_t type;
boolean_t is_metadata;
if (zb->zb_level == ZB_DNODE_LEVEL)
return (0);
if (dump_opt['b'] >= 5 && bp->blk_birth > 0) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("objset %llu object %llu "
"level %lld offset 0x%llx %s\n",
(u_longlong_t)zb->zb_objset,
(u_longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(u_longlong_t)blkid2offset(dnp, bp, zb),
blkbuf);
}
if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp))
return (0);
type = BP_GET_TYPE(bp);
zdb_count_block(zcb, zilog, bp,
(type & DMU_OT_NEWTYPE) ? ZDB_OT_OTHER : type);
is_metadata = (BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type));
if (!BP_IS_EMBEDDED(bp) &&
(dump_opt['c'] > 1 || (dump_opt['c'] && is_metadata))) {
size_t size = BP_GET_PSIZE(bp);
abd_t *abd = abd_alloc(size, B_FALSE);
int flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB | ZIO_FLAG_RAW;
/* If it's an intent log block, failure is expected. */
if (zb->zb_level == ZB_ZIL_LEVEL)
flags |= ZIO_FLAG_SPECULATIVE;
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_load_verify_bytes > max_inflight_bytes)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_load_verify_bytes += size;
mutex_exit(&spa->spa_scrub_lock);
zio_nowait(zio_read(NULL, spa, bp, abd, size,
zdb_blkptr_done, zcb, ZIO_PRIORITY_ASYNC_READ, flags, zb));
}
zcb->zcb_readfails = 0;
/* only call gethrtime() every 100 blocks */
static int iters;
if (++iters > 100)
iters = 0;
else
return (0);
if (dump_opt['b'] < 5 && gethrtime() > zcb->zcb_lastprint + NANOSEC) {
uint64_t now = gethrtime();
char buf[10];
uint64_t bytes = zcb->zcb_type[ZB_TOTAL][ZDB_OT_TOTAL].zb_asize;
uint64_t kb_per_sec =
1 + bytes / (1 + ((now - zcb->zcb_start) / 1000 / 1000));
uint64_t sec_remaining =
(zcb->zcb_totalasize - bytes) / 1024 / kb_per_sec;
/* make sure nicenum has enough space */
_Static_assert(sizeof (buf) >= NN_NUMBUF_SZ, "buf truncated");
zfs_nicebytes(bytes, buf, sizeof (buf));
(void) fprintf(stderr,
"\r%5s completed (%4"PRIu64"MB/s) "
"estimated time remaining: "
"%"PRIu64"hr %02"PRIu64"min %02"PRIu64"sec ",
buf, kb_per_sec / 1024,
sec_remaining / 60 / 60,
sec_remaining / 60 % 60,
sec_remaining % 60);
zcb->zcb_lastprint = now;
}
return (0);
}
static void
zdb_leak(void *arg, uint64_t start, uint64_t size)
{
vdev_t *vd = arg;
(void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n",
(u_longlong_t)vd->vdev_id, (u_longlong_t)start, (u_longlong_t)size);
}
static metaslab_ops_t zdb_metaslab_ops = {
NULL /* alloc */
};
static int
load_unflushed_svr_segs_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
spa_vdev_removal_t *svr = arg;
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
/* skip vdevs we don't care about */
if (sme->sme_vdev != svr->svr_vdev_id)
return (0);
vdev_t *vd = vdev_lookup_top(spa, sme->sme_vdev);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
if (sme->sme_type == SM_ALLOC)
range_tree_add(svr->svr_allocd_segs, offset, size);
else
range_tree_remove(svr->svr_allocd_segs, offset, size);
return (0);
}
static void
claim_segment_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
(void) inner_offset, (void) arg;
/*
* This callback was called through a remap from
* a device being removed. Therefore, the vdev that
* this callback is applied to is a concrete
* vdev.
*/
ASSERT(vdev_is_concrete(vd));
VERIFY0(metaslab_claim_impl(vd, offset, size,
spa_min_claim_txg(vd->vdev_spa)));
}
static void
claim_segment_cb(void *arg, uint64_t offset, uint64_t size)
{
vdev_t *vd = arg;
vdev_indirect_ops.vdev_op_remap(vd, offset, size,
claim_segment_impl_cb, NULL);
}
/*
* After accounting for all allocated blocks that are directly referenced,
* we might have missed a reference to a block from a partially complete
* (and thus unused) indirect mapping object. We perform a secondary pass
* through the metaslabs we have already mapped and claim the destination
* blocks.
*/
static void
zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb)
{
if (dump_opt['L'])
return;
if (spa->spa_vdev_removal == NULL)
return;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
ASSERT0(range_tree_space(svr->svr_allocd_segs));
range_tree_t *allocs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
metaslab_t *msp = vd->vdev_ms[msi];
ASSERT0(range_tree_space(allocs));
if (msp->ms_sm != NULL)
VERIFY0(space_map_load(msp->ms_sm, allocs, SM_ALLOC));
range_tree_vacate(allocs, range_tree_add, svr->svr_allocd_segs);
}
range_tree_destroy(allocs);
iterate_through_spacemap_logs(spa, load_unflushed_svr_segs_cb, svr);
/*
* Clear everything past what has been synced,
* because we have not allocated mappings for
* it yet.
*/
range_tree_clear(svr->svr_allocd_segs,
vdev_indirect_mapping_max_offset(vim),
vd->vdev_asize - vdev_indirect_mapping_max_offset(vim));
zcb->zcb_removing_size += range_tree_space(svr->svr_allocd_segs);
range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
static int
increment_indirect_mapping_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
(void) tx;
zdb_cb_t *zcb = arg;
spa_t *spa = zcb->zcb_spa;
vdev_t *vd;
const dva_t *dva = &bp->blk_dva[0];
ASSERT(!bp_freed);
ASSERT(!dump_opt['L']);
ASSERT3U(BP_GET_NDVAS(bp), ==, 1);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
vd = vdev_lookup_top(zcb->zcb_spa, DVA_GET_VDEV(dva));
ASSERT3P(vd, !=, NULL);
spa_config_exit(spa, SCL_VDEV, FTAG);
ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
ASSERT3P(zcb->zcb_vd_obsolete_counts[vd->vdev_id], !=, NULL);
vdev_indirect_mapping_increment_obsolete_count(
vd->vdev_indirect_mapping,
DVA_GET_OFFSET(dva), DVA_GET_ASIZE(dva),
zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
return (0);
}
static uint32_t *
zdb_load_obsolete_counts(vdev_t *vd)
{
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
spa_t *spa = vd->vdev_spa;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
uint64_t obsolete_sm_object;
uint32_t *counts;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
EQUIV(obsolete_sm_object != 0, vd->vdev_obsolete_sm != NULL);
counts = vdev_indirect_mapping_load_obsolete_counts(vim);
if (vd->vdev_obsolete_sm != NULL) {
vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
vd->vdev_obsolete_sm);
}
if (scip->scip_vdev == vd->vdev_id &&
scip->scip_prev_obsolete_sm_object != 0) {
space_map_t *prev_obsolete_sm = NULL;
VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
prev_obsolete_sm);
space_map_close(prev_obsolete_sm);
}
return (counts);
}
static void
zdb_ddt_leak_init(spa_t *spa, zdb_cb_t *zcb)
{
ddt_bookmark_t ddb = {0};
ddt_entry_t dde;
int error;
int p;
ASSERT(!dump_opt['L']);
while ((error = ddt_walk(spa, &ddb, &dde)) == 0) {
blkptr_t blk;
ddt_phys_t *ddp = dde.dde_phys;
if (ddb.ddb_class == DDT_CLASS_UNIQUE)
return;
ASSERT(ddt_phys_total_refcnt(&dde) > 1);
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0)
continue;
ddt_bp_create(ddb.ddb_checksum,
&dde.dde_key, ddp, &blk);
if (p == DDT_PHYS_DITTO) {
zdb_count_block(zcb, NULL, &blk, ZDB_OT_DITTO);
} else {
zcb->zcb_dedup_asize +=
BP_GET_ASIZE(&blk) * (ddp->ddp_refcnt - 1);
zcb->zcb_dedup_blocks++;
}
}
ddt_t *ddt = spa->spa_ddt[ddb.ddb_checksum];
ddt_enter(ddt);
VERIFY(ddt_lookup(ddt, &blk, B_TRUE) != NULL);
ddt_exit(ddt);
}
ASSERT(error == ENOENT);
}
typedef struct checkpoint_sm_exclude_entry_arg {
vdev_t *cseea_vd;
uint64_t cseea_checkpoint_size;
} checkpoint_sm_exclude_entry_arg_t;
static int
checkpoint_sm_exclude_entry_cb(space_map_entry_t *sme, void *arg)
{
checkpoint_sm_exclude_entry_arg_t *cseea = arg;
vdev_t *vd = cseea->cseea_vd;
metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift];
uint64_t end = sme->sme_offset + sme->sme_run;
ASSERT(sme->sme_type == SM_FREE);
/*
* Since the vdev_checkpoint_sm exists in the vdev level
* and the ms_sm space maps exist in the metaslab level,
* an entry in the checkpoint space map could theoretically
* cross the boundaries of the metaslab that it belongs.
*
* In reality, because of the way that we populate and
* manipulate the checkpoint's space maps currently,
* there shouldn't be any entries that cross metaslabs.
* Hence the assertion below.
*
* That said, there is no fundamental requirement that
* the checkpoint's space map entries should not cross
* metaslab boundaries. So if needed we could add code
* that handles metaslab-crossing segments in the future.
*/
VERIFY3U(sme->sme_offset, >=, ms->ms_start);
VERIFY3U(end, <=, ms->ms_start + ms->ms_size);
/*
* By removing the entry from the allocated segments we
* also verify that the entry is there to begin with.
*/
mutex_enter(&ms->ms_lock);
range_tree_remove(ms->ms_allocatable, sme->sme_offset, sme->sme_run);
mutex_exit(&ms->ms_lock);
cseea->cseea_checkpoint_size += sme->sme_run;
return (0);
}
static void
zdb_leak_init_vdev_exclude_checkpoint(vdev_t *vd, zdb_cb_t *zcb)
{
spa_t *spa = vd->vdev_spa;
space_map_t *checkpoint_sm = NULL;
uint64_t checkpoint_sm_obj;
/*
* If there is no vdev_top_zap, we are in a pool whose
* version predates the pool checkpoint feature.
*/
if (vd->vdev_top_zap == 0)
return;
/*
* If there is no reference of the vdev_checkpoint_sm in
* the vdev_top_zap, then one of the following scenarios
* is true:
*
* 1] There is no checkpoint
* 2] There is a checkpoint, but no checkpointed blocks
* have been freed yet
* 3] The current vdev is indirect
*
* In these cases we return immediately.
*/
if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
return;
VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1,
&checkpoint_sm_obj));
checkpoint_sm_exclude_entry_arg_t cseea;
cseea.cseea_vd = vd;
cseea.cseea_checkpoint_size = 0;
VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa),
checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift));
VERIFY0(space_map_iterate(checkpoint_sm,
space_map_length(checkpoint_sm),
checkpoint_sm_exclude_entry_cb, &cseea));
space_map_close(checkpoint_sm);
zcb->zcb_checkpoint_size += cseea.cseea_checkpoint_size;
}
static void
zdb_leak_init_exclude_checkpoint(spa_t *spa, zdb_cb_t *zcb)
{
ASSERT(!dump_opt['L']);
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
ASSERT3U(c, ==, rvd->vdev_child[c]->vdev_id);
zdb_leak_init_vdev_exclude_checkpoint(rvd->vdev_child[c], zcb);
}
}
static int
count_unflushed_space_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
int64_t *ualloc_space = arg;
uint64_t offset = sme->sme_offset;
uint64_t vdev_id = sme->sme_vdev;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
if (sme->sme_type == SM_ALLOC)
*ualloc_space += sme->sme_run;
else
*ualloc_space -= sme->sme_run;
return (0);
}
static int64_t
get_unflushed_alloc_space(spa_t *spa)
{
if (dump_opt['L'])
return (0);
int64_t ualloc_space = 0;
iterate_through_spacemap_logs(spa, count_unflushed_space_cb,
&ualloc_space);
return (ualloc_space);
}
static int
load_unflushed_cb(spa_t *spa, space_map_entry_t *sme, uint64_t txg, void *arg)
{
maptype_t *uic_maptype = arg;
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
uint64_t vdev_id = sme->sme_vdev;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
/* skip indirect vdevs */
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
ASSERT(*uic_maptype == SM_ALLOC || *uic_maptype == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
if (*uic_maptype == sme->sme_type)
range_tree_add(ms->ms_allocatable, offset, size);
else
range_tree_remove(ms->ms_allocatable, offset, size);
return (0);
}
static void
load_unflushed_to_ms_allocatables(spa_t *spa, maptype_t maptype)
{
iterate_through_spacemap_logs(spa, load_unflushed_cb, &maptype);
}
static void
load_concrete_ms_allocatable_trees(spa_t *spa, maptype_t maptype)
{
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
ASSERT3U(i, ==, vd->vdev_id);
if (vd->vdev_ops == &vdev_indirect_ops)
continue;
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
(void) fprintf(stderr,
"\rloading concrete vdev %llu, "
"metaslab %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)msp->ms_id,
(longlong_t)vd->vdev_ms_count);
mutex_enter(&msp->ms_lock);
range_tree_vacate(msp->ms_allocatable, NULL, NULL);
/*
* We don't want to spend the CPU manipulating the
* size-ordered tree, so clear the range_tree ops.
*/
msp->ms_allocatable->rt_ops = NULL;
if (msp->ms_sm != NULL) {
VERIFY0(space_map_load(msp->ms_sm,
msp->ms_allocatable, maptype));
}
if (!msp->ms_loaded)
msp->ms_loaded = B_TRUE;
mutex_exit(&msp->ms_lock);
}
}
load_unflushed_to_ms_allocatables(spa, maptype);
}
/*
* vm_idxp is an in-out parameter which (for indirect vdevs) is the
* index in vim_entries that has the first entry in this metaslab.
* On return, it will be set to the first entry after this metaslab.
*/
static void
load_indirect_ms_allocatable_tree(vdev_t *vd, metaslab_t *msp,
uint64_t *vim_idxp)
{
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
mutex_enter(&msp->ms_lock);
range_tree_vacate(msp->ms_allocatable, NULL, NULL);
/*
* We don't want to spend the CPU manipulating the
* size-ordered tree, so clear the range_tree ops.
*/
msp->ms_allocatable->rt_ops = NULL;
for (; *vim_idxp < vdev_indirect_mapping_num_entries(vim);
(*vim_idxp)++) {
vdev_indirect_mapping_entry_phys_t *vimep =
&vim->vim_entries[*vim_idxp];
uint64_t ent_offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
uint64_t ent_len = DVA_GET_ASIZE(&vimep->vimep_dst);
ASSERT3U(ent_offset, >=, msp->ms_start);
if (ent_offset >= msp->ms_start + msp->ms_size)
break;
/*
* Mappings do not cross metaslab boundaries,
* because we create them by walking the metaslabs.
*/
ASSERT3U(ent_offset + ent_len, <=,
msp->ms_start + msp->ms_size);
range_tree_add(msp->ms_allocatable, ent_offset, ent_len);
}
if (!msp->ms_loaded)
msp->ms_loaded = B_TRUE;
mutex_exit(&msp->ms_lock);
}
static void
zdb_leak_init_prepare_indirect_vdevs(spa_t *spa, zdb_cb_t *zcb)
{
ASSERT(!dump_opt['L']);
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
ASSERT3U(c, ==, vd->vdev_id);
if (vd->vdev_ops != &vdev_indirect_ops)
continue;
/*
* Note: we don't check for mapping leaks on
* removing vdevs because their ms_allocatable's
* are used to look for leaks in allocated space.
*/
zcb->zcb_vd_obsolete_counts[c] = zdb_load_obsolete_counts(vd);
/*
* Normally, indirect vdevs don't have any
* metaslabs. We want to set them up for
* zio_claim().
*/
vdev_metaslab_group_create(vd);
VERIFY0(vdev_metaslab_init(vd, 0));
vdev_indirect_mapping_t *vim __maybe_unused =
vd->vdev_indirect_mapping;
uint64_t vim_idx = 0;
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
(void) fprintf(stderr,
"\rloading indirect vdev %llu, "
"metaslab %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)vd->vdev_ms[m]->ms_id,
(longlong_t)vd->vdev_ms_count);
load_indirect_ms_allocatable_tree(vd, vd->vdev_ms[m],
&vim_idx);
}
ASSERT3U(vim_idx, ==, vdev_indirect_mapping_num_entries(vim));
}
}
static void
zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
{
zcb->zcb_spa = spa;
if (dump_opt['L'])
return;
dsl_pool_t *dp = spa->spa_dsl_pool;
vdev_t *rvd = spa->spa_root_vdev;
/*
* We are going to be changing the meaning of the metaslab's
* ms_allocatable. Ensure that the allocator doesn't try to
* use the tree.
*/
spa->spa_normal_class->mc_ops = &zdb_metaslab_ops;
spa->spa_log_class->mc_ops = &zdb_metaslab_ops;
spa->spa_embedded_log_class->mc_ops = &zdb_metaslab_ops;
zcb->zcb_vd_obsolete_counts =
umem_zalloc(rvd->vdev_children * sizeof (uint32_t *),
UMEM_NOFAIL);
/*
* For leak detection, we overload the ms_allocatable trees
* to contain allocated segments instead of free segments.
* As a result, we can't use the normal metaslab_load/unload
* interfaces.
*/
zdb_leak_init_prepare_indirect_vdevs(spa, zcb);
load_concrete_ms_allocatable_trees(spa, SM_ALLOC);
/*
* On load_concrete_ms_allocatable_trees() we loaded all the
* allocated entries from the ms_sm to the ms_allocatable for
* each metaslab. If the pool has a checkpoint or is in the
* middle of discarding a checkpoint, some of these blocks
* may have been freed but their ms_sm may not have been
* updated because they are referenced by the checkpoint. In
* order to avoid false-positives during leak-detection, we
* go through the vdev's checkpoint space map and exclude all
* its entries from their relevant ms_allocatable.
*
* We also aggregate the space held by the checkpoint and add
* it to zcb_checkpoint_size.
*
* Note that at this point we are also verifying that all the
* entries on the checkpoint_sm are marked as allocated in
* the ms_sm of their relevant metaslab.
* [see comment in checkpoint_sm_exclude_entry_cb()]
*/
zdb_leak_init_exclude_checkpoint(spa, zcb);
ASSERT3U(zcb->zcb_checkpoint_size, ==, spa_get_checkpoint_space(spa));
/* for cleaner progress output */
(void) fprintf(stderr, "\n");
if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_DEVICE_REMOVAL));
(void) bpobj_iterate_nofree(&dp->dp_obsolete_bpobj,
increment_indirect_mapping_cb, zcb, NULL);
}
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
zdb_ddt_leak_init(spa, zcb);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
static boolean_t
zdb_check_for_obsolete_leaks(vdev_t *vd, zdb_cb_t *zcb)
{
boolean_t leaks = B_FALSE;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
uint64_t total_leaked = 0;
boolean_t are_precise = B_FALSE;
ASSERT(vim != NULL);
for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
vdev_indirect_mapping_entry_phys_t *vimep =
&vim->vim_entries[i];
uint64_t obsolete_bytes = 0;
uint64_t offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
/*
* This is not very efficient but it's easy to
* verify correctness.
*/
for (uint64_t inner_offset = 0;
inner_offset < DVA_GET_ASIZE(&vimep->vimep_dst);
inner_offset += 1ULL << vd->vdev_ashift) {
if (range_tree_contains(msp->ms_allocatable,
offset + inner_offset, 1ULL << vd->vdev_ashift)) {
obsolete_bytes += 1ULL << vd->vdev_ashift;
}
}
int64_t bytes_leaked = obsolete_bytes -
zcb->zcb_vd_obsolete_counts[vd->vdev_id][i];
ASSERT3U(DVA_GET_ASIZE(&vimep->vimep_dst), >=,
zcb->zcb_vd_obsolete_counts[vd->vdev_id][i]);
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (bytes_leaked != 0 && (are_precise || dump_opt['d'] >= 5)) {
(void) printf("obsolete indirect mapping count "
"mismatch on %llu:%llx:%llx : %llx bytes leaked\n",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
(u_longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
(u_longlong_t)bytes_leaked);
}
total_leaked += ABS(bytes_leaked);
}
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (!are_precise && total_leaked > 0) {
int pct_leaked = total_leaked * 100 /
vdev_indirect_mapping_bytes_mapped(vim);
(void) printf("cannot verify obsolete indirect mapping "
"counts of vdev %llu because precise feature was not "
"enabled when it was removed: %d%% (%llx bytes) of mapping"
"unreferenced\n",
(u_longlong_t)vd->vdev_id, pct_leaked,
(u_longlong_t)total_leaked);
} else if (total_leaked > 0) {
(void) printf("obsolete indirect mapping count mismatch "
"for vdev %llu -- %llx total bytes mismatched\n",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)total_leaked);
leaks |= B_TRUE;
}
vdev_indirect_mapping_free_obsolete_counts(vim,
zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
zcb->zcb_vd_obsolete_counts[vd->vdev_id] = NULL;
return (leaks);
}
static boolean_t
zdb_leak_fini(spa_t *spa, zdb_cb_t *zcb)
{
if (dump_opt['L'])
return (B_FALSE);
boolean_t leaks = B_FALSE;
vdev_t *rvd = spa->spa_root_vdev;
for (unsigned c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (zcb->zcb_vd_obsolete_counts[c] != NULL) {
leaks |= zdb_check_for_obsolete_leaks(vd, zcb);
}
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
ASSERT3P(msp->ms_group, ==, (msp->ms_group->mg_class ==
spa_embedded_log_class(spa)) ?
vd->vdev_log_mg : vd->vdev_mg);
/*
* ms_allocatable has been overloaded
* to contain allocated segments. Now that
* we finished traversing all blocks, any
* block that remains in the ms_allocatable
* represents an allocated block that we
* did not claim during the traversal.
* Claimed blocks would have been removed
* from the ms_allocatable. For indirect
* vdevs, space remaining in the tree
* represents parts of the mapping that are
* not referenced, which is not a bug.
*/
if (vd->vdev_ops == &vdev_indirect_ops) {
range_tree_vacate(msp->ms_allocatable,
NULL, NULL);
} else {
range_tree_vacate(msp->ms_allocatable,
zdb_leak, vd);
}
if (msp->ms_loaded) {
msp->ms_loaded = B_FALSE;
}
}
}
umem_free(zcb->zcb_vd_obsolete_counts,
rvd->vdev_children * sizeof (uint32_t *));
zcb->zcb_vd_obsolete_counts = NULL;
return (leaks);
}
static int
count_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
(void) tx;
zdb_cb_t *zcb = arg;
if (dump_opt['b'] >= 5) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("[%s] %s\n",
"deferred free", blkbuf);
}
zdb_count_block(zcb, NULL, bp, ZDB_OT_DEFERRED);
return (0);
}
/*
* Iterate over livelists which have been destroyed by the user but
* are still present in the MOS, waiting to be freed
*/
static void
iterate_deleted_livelists(spa_t *spa, ll_iter_t func, void *arg)
{
objset_t *mos = spa->spa_meta_objset;
uint64_t zap_obj;
int err = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, sizeof (uint64_t), 1, &zap_obj);
if (err == ENOENT)
return;
ASSERT0(err);
zap_cursor_t zc;
zap_attribute_t attr;
dsl_deadlist_t ll;
/* NULL out os prior to dsl_deadlist_open in case it's garbage */
ll.dl_os = NULL;
for (zap_cursor_init(&zc, mos, zap_obj);
zap_cursor_retrieve(&zc, &attr) == 0;
(void) zap_cursor_advance(&zc)) {
dsl_deadlist_open(&ll, mos, attr.za_first_integer);
func(&ll, arg);
dsl_deadlist_close(&ll);
}
zap_cursor_fini(&zc);
}
static int
bpobj_count_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
return (count_block_cb(arg, bp, tx));
}
static int
livelist_entry_count_blocks_cb(void *args, dsl_deadlist_entry_t *dle)
{
zdb_cb_t *zbc = args;
bplist_t blks;
bplist_create(&blks);
/* determine which blocks have been alloc'd but not freed */
VERIFY0(dsl_process_sub_livelist(&dle->dle_bpobj, &blks, NULL, NULL));
/* count those blocks */
(void) bplist_iterate(&blks, count_block_cb, zbc, NULL);
bplist_destroy(&blks);
return (0);
}
static void
livelist_count_blocks(dsl_deadlist_t *ll, void *arg)
{
dsl_deadlist_iterate(ll, livelist_entry_count_blocks_cb, arg);
}
/*
* Count the blocks in the livelists that have been destroyed by the user
* but haven't yet been freed.
*/
static void
deleted_livelists_count_blocks(spa_t *spa, zdb_cb_t *zbc)
{
iterate_deleted_livelists(spa, livelist_count_blocks, zbc);
}
static void
dump_livelist_cb(dsl_deadlist_t *ll, void *arg)
{
ASSERT3P(arg, ==, NULL);
global_feature_count[SPA_FEATURE_LIVELIST]++;
dump_blkptr_list(ll, "Deleted Livelist");
dsl_deadlist_iterate(ll, sublivelist_verify_lightweight, NULL);
}
/*
* Print out, register object references to, and increment feature counts for
* livelists that have been destroyed by the user but haven't yet been freed.
*/
static void
deleted_livelists_dump_mos(spa_t *spa)
{
uint64_t zap_obj;
objset_t *mos = spa->spa_meta_objset;
int err = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, sizeof (uint64_t), 1, &zap_obj);
if (err == ENOENT)
return;
mos_obj_refd(zap_obj);
iterate_deleted_livelists(spa, dump_livelist_cb, NULL);
}
+static int
+zdb_brt_entry_compare(const void *zcn1, const void *zcn2)
+{
+ const dva_t *dva1 = &((const zdb_brt_entry_t *)zcn1)->zbre_dva;
+ const dva_t *dva2 = &((const zdb_brt_entry_t *)zcn2)->zbre_dva;
+ int cmp;
+
+ cmp = TREE_CMP(DVA_GET_VDEV(dva1), DVA_GET_VDEV(dva2));
+ if (cmp == 0)
+ cmp = TREE_CMP(DVA_GET_OFFSET(dva1), DVA_GET_OFFSET(dva2));
+
+ return (cmp);
+}
+
static int
dump_block_stats(spa_t *spa)
{
zdb_cb_t *zcb;
zdb_blkstats_t *zb, *tzb;
uint64_t norm_alloc, norm_space, total_alloc, total_found;
int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT | TRAVERSE_HARD;
boolean_t leaks = B_FALSE;
int e, c, err;
bp_embedded_type_t i;
zcb = umem_zalloc(sizeof (zdb_cb_t), UMEM_NOFAIL);
+ if (spa_feature_is_active(spa, SPA_FEATURE_BLOCK_CLONING)) {
+ avl_create(&zcb->zcb_brt, zdb_brt_entry_compare,
+ sizeof (zdb_brt_entry_t),
+ offsetof(zdb_brt_entry_t, zbre_node));
+ zcb->zcb_brt_is_active = B_TRUE;
+ }
+
(void) printf("\nTraversing all blocks %s%s%s%s%s...\n\n",
(dump_opt['c'] || !dump_opt['L']) ? "to verify " : "",
(dump_opt['c'] == 1) ? "metadata " : "",
dump_opt['c'] ? "checksums " : "",
(dump_opt['c'] && !dump_opt['L']) ? "and verify " : "",
!dump_opt['L'] ? "nothing leaked " : "");
/*
* When leak detection is enabled we load all space maps as SM_ALLOC
* maps, then traverse the pool claiming each block we discover. If
* the pool is perfectly consistent, the segment trees will be empty
* when we're done. Anything left over is a leak; any block we can't
* claim (because it's not part of any space map) is a double
* allocation, reference to a freed block, or an unclaimed log block.
*
* When leak detection is disabled (-L option) we still traverse the
* pool claiming each block we discover, but we skip opening any space
* maps.
*/
zdb_leak_init(spa, zcb);
/*
* If there's a deferred-free bplist, process that first.
*/
(void) bpobj_iterate_nofree(&spa->spa_deferred_bpobj,
bpobj_count_block_cb, zcb, NULL);
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
(void) bpobj_iterate_nofree(&spa->spa_dsl_pool->dp_free_bpobj,
bpobj_count_block_cb, zcb, NULL);
}
zdb_claim_removing(spa, zcb);
if (spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
VERIFY3U(0, ==, bptree_iterate(spa->spa_meta_objset,
spa->spa_dsl_pool->dp_bptree_obj, B_FALSE, count_block_cb,
zcb, NULL));
}
deleted_livelists_count_blocks(spa, zcb);
if (dump_opt['c'] > 1)
flags |= TRAVERSE_PREFETCH_DATA;
zcb->zcb_totalasize = metaslab_class_get_alloc(spa_normal_class(spa));
zcb->zcb_totalasize += metaslab_class_get_alloc(spa_special_class(spa));
zcb->zcb_totalasize += metaslab_class_get_alloc(spa_dedup_class(spa));
zcb->zcb_totalasize +=
metaslab_class_get_alloc(spa_embedded_log_class(spa));
zcb->zcb_start = zcb->zcb_lastprint = gethrtime();
err = traverse_pool(spa, 0, flags, zdb_blkptr_cb, zcb);
/*
* If we've traversed the data blocks then we need to wait for those
* I/Os to complete. We leverage "The Godfather" zio to wait on
* all async I/Os to complete.
*/
if (dump_opt['c']) {
for (c = 0; c < max_ncpus; c++) {
(void) zio_wait(spa->spa_async_zio_root[c]);
spa->spa_async_zio_root[c] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
}
ASSERT0(spa->spa_load_verify_bytes);
/*
* Done after zio_wait() since zcb_haderrors is modified in
* zdb_blkptr_done()
*/
zcb->zcb_haderrors |= err;
if (zcb->zcb_haderrors) {
(void) printf("\nError counts:\n\n");
(void) printf("\t%5s %s\n", "errno", "count");
for (e = 0; e < 256; e++) {
if (zcb->zcb_errors[e] != 0) {
(void) printf("\t%5d %llu\n",
e, (u_longlong_t)zcb->zcb_errors[e]);
}
}
}
/*
* Report any leaked segments.
*/
leaks |= zdb_leak_fini(spa, zcb);
tzb = &zcb->zcb_type[ZB_TOTAL][ZDB_OT_TOTAL];
norm_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
norm_space = metaslab_class_get_space(spa_normal_class(spa));
total_alloc = norm_alloc +
metaslab_class_get_alloc(spa_log_class(spa)) +
metaslab_class_get_alloc(spa_embedded_log_class(spa)) +
metaslab_class_get_alloc(spa_special_class(spa)) +
metaslab_class_get_alloc(spa_dedup_class(spa)) +
get_unflushed_alloc_space(spa);
- total_found = tzb->zb_asize - zcb->zcb_dedup_asize +
+ total_found =
+ tzb->zb_asize - zcb->zcb_dedup_asize - zcb->zcb_clone_asize +
zcb->zcb_removing_size + zcb->zcb_checkpoint_size;
if (total_found == total_alloc && !dump_opt['L']) {
(void) printf("\n\tNo leaks (block sum matches space"
" maps exactly)\n");
} else if (!dump_opt['L']) {
(void) printf("block traversal size %llu != alloc %llu "
"(%s %lld)\n",
(u_longlong_t)total_found,
(u_longlong_t)total_alloc,
(dump_opt['L']) ? "unreachable" : "leaked",
(longlong_t)(total_alloc - total_found));
leaks = B_TRUE;
}
if (tzb->zb_count == 0) {
umem_free(zcb, sizeof (zdb_cb_t));
return (2);
}
(void) printf("\n");
(void) printf("\t%-16s %14llu\n", "bp count:",
(u_longlong_t)tzb->zb_count);
(void) printf("\t%-16s %14llu\n", "ganged count:",
(longlong_t)tzb->zb_gangs);
(void) printf("\t%-16s %14llu avg: %6llu\n", "bp logical:",
(u_longlong_t)tzb->zb_lsize,
(u_longlong_t)(tzb->zb_lsize / tzb->zb_count));
(void) printf("\t%-16s %14llu avg: %6llu compression: %6.2f\n",
"bp physical:", (u_longlong_t)tzb->zb_psize,
(u_longlong_t)(tzb->zb_psize / tzb->zb_count),
(double)tzb->zb_lsize / tzb->zb_psize);
(void) printf("\t%-16s %14llu avg: %6llu compression: %6.2f\n",
"bp allocated:", (u_longlong_t)tzb->zb_asize,
(u_longlong_t)(tzb->zb_asize / tzb->zb_count),
(double)tzb->zb_lsize / tzb->zb_asize);
(void) printf("\t%-16s %14llu ref>1: %6llu deduplication: %6.2f\n",
"bp deduped:", (u_longlong_t)zcb->zcb_dedup_asize,
(u_longlong_t)zcb->zcb_dedup_blocks,
(double)zcb->zcb_dedup_asize / tzb->zb_asize + 1.0);
+ (void) printf("\t%-16s %14llu count: %6llu\n",
+ "bp cloned:", (u_longlong_t)zcb->zcb_clone_asize,
+ (u_longlong_t)zcb->zcb_clone_blocks);
(void) printf("\t%-16s %14llu used: %5.2f%%\n", "Normal class:",
(u_longlong_t)norm_alloc, 100.0 * norm_alloc / norm_space);
if (spa_special_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_special_class(spa));
uint64_t space = metaslab_class_get_space(
spa_special_class(spa));
(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Special class", (u_longlong_t)alloc,
100.0 * alloc / space);
}
if (spa_dedup_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_dedup_class(spa));
uint64_t space = metaslab_class_get_space(
spa_dedup_class(spa));
(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Dedup class", (u_longlong_t)alloc,
100.0 * alloc / space);
}
if (spa_embedded_log_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_embedded_log_class(spa));
uint64_t space = metaslab_class_get_space(
spa_embedded_log_class(spa));
(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Embedded log class", (u_longlong_t)alloc,
100.0 * alloc / space);
}
for (i = 0; i < NUM_BP_EMBEDDED_TYPES; i++) {
if (zcb->zcb_embedded_blocks[i] == 0)
continue;
(void) printf("\n");
(void) printf("\tadditional, non-pointer bps of type %u: "
"%10llu\n",
i, (u_longlong_t)zcb->zcb_embedded_blocks[i]);
if (dump_opt['b'] >= 3) {
(void) printf("\t number of (compressed) bytes: "
"number of bps\n");
dump_histogram(zcb->zcb_embedded_histogram[i],
sizeof (zcb->zcb_embedded_histogram[i]) /
sizeof (zcb->zcb_embedded_histogram[i][0]), 0);
}
}
if (tzb->zb_ditto_samevdev != 0) {
(void) printf("\tDittoed blocks on same vdev: %llu\n",
(longlong_t)tzb->zb_ditto_samevdev);
}
if (tzb->zb_ditto_same_ms != 0) {
(void) printf("\tDittoed blocks in same metaslab: %llu\n",
(longlong_t)tzb->zb_ditto_same_ms);
}
for (uint64_t v = 0; v < spa->spa_root_vdev->vdev_children; v++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[v];
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
if (vim == NULL) {
continue;
}
char mem[32];
zdb_nicenum(vdev_indirect_mapping_num_entries(vim),
mem, vdev_indirect_mapping_size(vim));
(void) printf("\tindirect vdev id %llu has %llu segments "
"(%s in memory)\n",
(longlong_t)vd->vdev_id,
(longlong_t)vdev_indirect_mapping_num_entries(vim), mem);
}
if (dump_opt['b'] >= 2) {
int l, t, level;
char csize[32], lsize[32], psize[32], asize[32];
char avg[32], gang[32];
(void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE"
"\t avg\t comp\t%%Total\tType\n");
zfs_blkstat_t *mdstats = umem_zalloc(sizeof (zfs_blkstat_t),
UMEM_NOFAIL);
for (t = 0; t <= ZDB_OT_TOTAL; t++) {
const char *typename;
/* make sure nicenum has enough space */
_Static_assert(sizeof (csize) >= NN_NUMBUF_SZ,
"csize truncated");
_Static_assert(sizeof (lsize) >= NN_NUMBUF_SZ,
"lsize truncated");
_Static_assert(sizeof (psize) >= NN_NUMBUF_SZ,
"psize truncated");
_Static_assert(sizeof (asize) >= NN_NUMBUF_SZ,
"asize truncated");
_Static_assert(sizeof (avg) >= NN_NUMBUF_SZ,
"avg truncated");
_Static_assert(sizeof (gang) >= NN_NUMBUF_SZ,
"gang truncated");
if (t < DMU_OT_NUMTYPES)
typename = dmu_ot[t].ot_name;
else
typename = zdb_ot_extname[t - DMU_OT_NUMTYPES];
if (zcb->zcb_type[ZB_TOTAL][t].zb_asize == 0) {
(void) printf("%6s\t%5s\t%5s\t%5s"
"\t%5s\t%5s\t%6s\t%s\n",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
typename);
continue;
}
for (l = ZB_TOTAL - 1; l >= -1; l--) {
level = (l == -1 ? ZB_TOTAL : l);
zb = &zcb->zcb_type[level][t];
if (zb->zb_asize == 0)
continue;
if (level != ZB_TOTAL && t < DMU_OT_NUMTYPES &&
(level > 0 || DMU_OT_IS_METADATA(t))) {
mdstats->zb_count += zb->zb_count;
mdstats->zb_lsize += zb->zb_lsize;
mdstats->zb_psize += zb->zb_psize;
mdstats->zb_asize += zb->zb_asize;
mdstats->zb_gangs += zb->zb_gangs;
}
if (dump_opt['b'] < 3 && level != ZB_TOTAL)
continue;
if (level == 0 && zb->zb_asize ==
zcb->zcb_type[ZB_TOTAL][t].zb_asize)
continue;
zdb_nicenum(zb->zb_count, csize,
sizeof (csize));
zdb_nicenum(zb->zb_lsize, lsize,
sizeof (lsize));
zdb_nicenum(zb->zb_psize, psize,
sizeof (psize));
zdb_nicenum(zb->zb_asize, asize,
sizeof (asize));
zdb_nicenum(zb->zb_asize / zb->zb_count, avg,
sizeof (avg));
zdb_nicenum(zb->zb_gangs, gang, sizeof (gang));
(void) printf("%6s\t%5s\t%5s\t%5s\t%5s"
"\t%5.2f\t%6.2f\t",
csize, lsize, psize, asize, avg,
(double)zb->zb_lsize / zb->zb_psize,
100.0 * zb->zb_asize / tzb->zb_asize);
if (level == ZB_TOTAL)
(void) printf("%s\n", typename);
else
(void) printf(" L%d %s\n",
level, typename);
if (dump_opt['b'] >= 3 && zb->zb_gangs > 0) {
(void) printf("\t number of ganged "
"blocks: %s\n", gang);
}
if (dump_opt['b'] >= 4) {
(void) printf("psize "
"(in 512-byte sectors): "
"number of blocks\n");
dump_histogram(zb->zb_psize_histogram,
PSIZE_HISTO_SIZE, 0);
}
}
}
zdb_nicenum(mdstats->zb_count, csize,
sizeof (csize));
zdb_nicenum(mdstats->zb_lsize, lsize,
sizeof (lsize));
zdb_nicenum(mdstats->zb_psize, psize,
sizeof (psize));
zdb_nicenum(mdstats->zb_asize, asize,
sizeof (asize));
zdb_nicenum(mdstats->zb_asize / mdstats->zb_count, avg,
sizeof (avg));
zdb_nicenum(mdstats->zb_gangs, gang, sizeof (gang));
(void) printf("%6s\t%5s\t%5s\t%5s\t%5s"
"\t%5.2f\t%6.2f\t",
csize, lsize, psize, asize, avg,
(double)mdstats->zb_lsize / mdstats->zb_psize,
100.0 * mdstats->zb_asize / tzb->zb_asize);
(void) printf("%s\n", "Metadata Total");
/* Output a table summarizing block sizes in the pool */
if (dump_opt['b'] >= 2) {
dump_size_histograms(zcb);
}
umem_free(mdstats, sizeof (zfs_blkstat_t));
}
(void) printf("\n");
if (leaks) {
umem_free(zcb, sizeof (zdb_cb_t));
return (2);
}
if (zcb->zcb_haderrors) {
umem_free(zcb, sizeof (zdb_cb_t));
return (3);
}
umem_free(zcb, sizeof (zdb_cb_t));
return (0);
}
typedef struct zdb_ddt_entry {
ddt_key_t zdde_key;
uint64_t zdde_ref_blocks;
uint64_t zdde_ref_lsize;
uint64_t zdde_ref_psize;
uint64_t zdde_ref_dsize;
avl_node_t zdde_node;
} zdb_ddt_entry_t;
static int
zdb_ddt_add_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
(void) zilog, (void) dnp;
avl_tree_t *t = arg;
avl_index_t where;
zdb_ddt_entry_t *zdde, zdde_search;
if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
BP_IS_EMBEDDED(bp))
return (0);
if (dump_opt['S'] > 1 && zb->zb_level == ZB_ROOT_LEVEL) {
(void) printf("traversing objset %llu, %llu objects, "
"%lu blocks so far\n",
(u_longlong_t)zb->zb_objset,
(u_longlong_t)BP_GET_FILL(bp),
avl_numnodes(t));
}
if (BP_IS_HOLE(bp) || BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_OFF ||
BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp)))
return (0);
ddt_key_fill(&zdde_search.zdde_key, bp);
zdde = avl_find(t, &zdde_search, &where);
if (zdde == NULL) {
zdde = umem_zalloc(sizeof (*zdde), UMEM_NOFAIL);
zdde->zdde_key = zdde_search.zdde_key;
avl_insert(t, zdde, where);
}
zdde->zdde_ref_blocks += 1;
zdde->zdde_ref_lsize += BP_GET_LSIZE(bp);
zdde->zdde_ref_psize += BP_GET_PSIZE(bp);
zdde->zdde_ref_dsize += bp_get_dsize_sync(spa, bp);
return (0);
}
static void
dump_simulated_ddt(spa_t *spa)
{
avl_tree_t t;
void *cookie = NULL;
zdb_ddt_entry_t *zdde;
ddt_histogram_t ddh_total = {{{0}}};
ddt_stat_t dds_total = {0};
avl_create(&t, ddt_entry_compare,
sizeof (zdb_ddt_entry_t), offsetof(zdb_ddt_entry_t, zdde_node));
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
(void) traverse_pool(spa, 0, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT, zdb_ddt_add_cb, &t);
spa_config_exit(spa, SCL_CONFIG, FTAG);
while ((zdde = avl_destroy_nodes(&t, &cookie)) != NULL) {
ddt_stat_t dds;
uint64_t refcnt = zdde->zdde_ref_blocks;
ASSERT(refcnt != 0);
dds.dds_blocks = zdde->zdde_ref_blocks / refcnt;
dds.dds_lsize = zdde->zdde_ref_lsize / refcnt;
dds.dds_psize = zdde->zdde_ref_psize / refcnt;
dds.dds_dsize = zdde->zdde_ref_dsize / refcnt;
dds.dds_ref_blocks = zdde->zdde_ref_blocks;
dds.dds_ref_lsize = zdde->zdde_ref_lsize;
dds.dds_ref_psize = zdde->zdde_ref_psize;
dds.dds_ref_dsize = zdde->zdde_ref_dsize;
ddt_stat_add(&ddh_total.ddh_stat[highbit64(refcnt) - 1],
&dds, 0);
umem_free(zdde, sizeof (*zdde));
}
avl_destroy(&t);
ddt_histogram_stat(&dds_total, &ddh_total);
(void) printf("Simulated DDT histogram:\n");
zpool_dump_ddt(&dds_total, &ddh_total);
dump_dedup_ratio(&dds_total);
}
static int
verify_device_removal_feature_counts(spa_t *spa)
{
uint64_t dr_feature_refcount = 0;
uint64_t oc_feature_refcount = 0;
uint64_t indirect_vdev_count = 0;
uint64_t precise_vdev_count = 0;
uint64_t obsolete_counts_object_count = 0;
uint64_t obsolete_sm_count = 0;
uint64_t obsolete_counts_count = 0;
uint64_t scip_count = 0;
uint64_t obsolete_bpobj_count = 0;
int ret = 0;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
if (scip->scip_next_mapping_object != 0) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[scip->scip_vdev];
ASSERT(scip->scip_prev_obsolete_sm_object != 0);
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
(void) printf("Condensing indirect vdev %llu: new mapping "
"object %llu, prev obsolete sm %llu\n",
(u_longlong_t)scip->scip_vdev,
(u_longlong_t)scip->scip_next_mapping_object,
(u_longlong_t)scip->scip_prev_obsolete_sm_object);
if (scip->scip_prev_obsolete_sm_object != 0) {
space_map_t *prev_obsolete_sm = NULL;
VERIFY0(space_map_open(&prev_obsolete_sm,
spa->spa_meta_objset,
scip->scip_prev_obsolete_sm_object,
0, vd->vdev_asize, 0));
dump_spacemap(spa->spa_meta_objset, prev_obsolete_sm);
(void) printf("\n");
space_map_close(prev_obsolete_sm);
}
scip_count += 2;
}
for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
if (vic->vic_mapping_object != 0) {
ASSERT(vd->vdev_ops == &vdev_indirect_ops ||
vd->vdev_removing);
indirect_vdev_count++;
if (vd->vdev_indirect_mapping->vim_havecounts) {
obsolete_counts_count++;
}
}
boolean_t are_precise;
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (are_precise) {
ASSERT(vic->vic_mapping_object != 0);
precise_vdev_count++;
}
uint64_t obsolete_sm_object;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
ASSERT(vic->vic_mapping_object != 0);
obsolete_sm_count++;
}
}
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_DEVICE_REMOVAL],
&dr_feature_refcount);
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_OBSOLETE_COUNTS],
&oc_feature_refcount);
if (dr_feature_refcount != indirect_vdev_count) {
ret = 1;
(void) printf("Number of indirect vdevs (%llu) " \
"does not match feature count (%llu)\n",
(u_longlong_t)indirect_vdev_count,
(u_longlong_t)dr_feature_refcount);
} else {
(void) printf("Verified device_removal feature refcount " \
"of %llu is correct\n",
(u_longlong_t)dr_feature_refcount);
}
if (zap_contains(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_OBSOLETE_BPOBJ) == 0) {
obsolete_bpobj_count++;
}
obsolete_counts_object_count = precise_vdev_count;
obsolete_counts_object_count += obsolete_sm_count;
obsolete_counts_object_count += obsolete_counts_count;
obsolete_counts_object_count += scip_count;
obsolete_counts_object_count += obsolete_bpobj_count;
obsolete_counts_object_count += remap_deadlist_count;
if (oc_feature_refcount != obsolete_counts_object_count) {
ret = 1;
(void) printf("Number of obsolete counts objects (%llu) " \
"does not match feature count (%llu)\n",
(u_longlong_t)obsolete_counts_object_count,
(u_longlong_t)oc_feature_refcount);
(void) printf("pv:%llu os:%llu oc:%llu sc:%llu "
"ob:%llu rd:%llu\n",
(u_longlong_t)precise_vdev_count,
(u_longlong_t)obsolete_sm_count,
(u_longlong_t)obsolete_counts_count,
(u_longlong_t)scip_count,
(u_longlong_t)obsolete_bpobj_count,
(u_longlong_t)remap_deadlist_count);
} else {
(void) printf("Verified indirect_refcount feature refcount " \
"of %llu is correct\n",
(u_longlong_t)oc_feature_refcount);
}
return (ret);
}
static void
zdb_set_skip_mmp(char *target)
{
spa_t *spa;
/*
* Disable the activity check to allow examination of
* active pools.
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(target)) != NULL) {
spa->spa_import_flags |= ZFS_IMPORT_SKIP_MMP;
}
mutex_exit(&spa_namespace_lock);
}
#define BOGUS_SUFFIX "_CHECKPOINTED_UNIVERSE"
/*
* Import the checkpointed state of the pool specified by the target
* parameter as readonly. The function also accepts a pool config
* as an optional parameter, else it attempts to infer the config by
* the name of the target pool.
*
* Note that the checkpointed state's pool name will be the name of
* the original pool with the above suffix appended to it. In addition,
* if the target is not a pool name (e.g. a path to a dataset) then
* the new_path parameter is populated with the updated path to
* reflect the fact that we are looking into the checkpointed state.
*
* The function returns a newly-allocated copy of the name of the
* pool containing the checkpointed state. When this copy is no
* longer needed it should be freed with free(3C). Same thing
* applies to the new_path parameter if allocated.
*/
static char *
import_checkpointed_state(char *target, nvlist_t *cfg, char **new_path)
{
int error = 0;
char *poolname, *bogus_name = NULL;
boolean_t freecfg = B_FALSE;
/* If the target is not a pool, the extract the pool name */
char *path_start = strchr(target, '/');
if (path_start != NULL) {
size_t poolname_len = path_start - target;
poolname = strndup(target, poolname_len);
} else {
poolname = target;
}
if (cfg == NULL) {
zdb_set_skip_mmp(poolname);
error = spa_get_stats(poolname, &cfg, NULL, 0);
if (error != 0) {
fatal("Tried to read config of pool \"%s\" but "
"spa_get_stats() failed with error %d\n",
poolname, error);
}
freecfg = B_TRUE;
}
if (asprintf(&bogus_name, "%s%s", poolname, BOGUS_SUFFIX) == -1) {
if (target != poolname)
free(poolname);
return (NULL);
}
fnvlist_add_string(cfg, ZPOOL_CONFIG_POOL_NAME, bogus_name);
error = spa_import(bogus_name, cfg, NULL,
ZFS_IMPORT_MISSING_LOG | ZFS_IMPORT_CHECKPOINT |
ZFS_IMPORT_SKIP_MMP);
if (freecfg)
nvlist_free(cfg);
if (error != 0) {
fatal("Tried to import pool \"%s\" but spa_import() failed "
"with error %d\n", bogus_name, error);
}
if (new_path != NULL && path_start != NULL) {
if (asprintf(new_path, "%s%s", bogus_name, path_start) == -1) {
free(bogus_name);
if (path_start != NULL)
free(poolname);
return (NULL);
}
}
if (target != poolname)
free(poolname);
return (bogus_name);
}
typedef struct verify_checkpoint_sm_entry_cb_arg {
vdev_t *vcsec_vd;
/* the following fields are only used for printing progress */
uint64_t vcsec_entryid;
uint64_t vcsec_num_entries;
} verify_checkpoint_sm_entry_cb_arg_t;
#define ENTRIES_PER_PROGRESS_UPDATE 10000
static int
verify_checkpoint_sm_entry_cb(space_map_entry_t *sme, void *arg)
{
verify_checkpoint_sm_entry_cb_arg_t *vcsec = arg;
vdev_t *vd = vcsec->vcsec_vd;
metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift];
uint64_t end = sme->sme_offset + sme->sme_run;
ASSERT(sme->sme_type == SM_FREE);
if ((vcsec->vcsec_entryid % ENTRIES_PER_PROGRESS_UPDATE) == 0) {
(void) fprintf(stderr,
"\rverifying vdev %llu, space map entry %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)vcsec->vcsec_entryid,
(longlong_t)vcsec->vcsec_num_entries);
}
vcsec->vcsec_entryid++;
/*
* See comment in checkpoint_sm_exclude_entry_cb()
*/
VERIFY3U(sme->sme_offset, >=, ms->ms_start);
VERIFY3U(end, <=, ms->ms_start + ms->ms_size);
/*
* The entries in the vdev_checkpoint_sm should be marked as
* allocated in the checkpointed state of the pool, therefore
* their respective ms_allocateable trees should not contain them.
*/
mutex_enter(&ms->ms_lock);
range_tree_verify_not_present(ms->ms_allocatable,
sme->sme_offset, sme->sme_run);
mutex_exit(&ms->ms_lock);
return (0);
}
/*
* Verify that all segments in the vdev_checkpoint_sm are allocated
* according to the checkpoint's ms_sm (i.e. are not in the checkpoint's
* ms_allocatable).
*
* Do so by comparing the checkpoint space maps (vdev_checkpoint_sm) of
* each vdev in the current state of the pool to the metaslab space maps
* (ms_sm) of the checkpointed state of the pool.
*
* Note that the function changes the state of the ms_allocatable
* trees of the current spa_t. The entries of these ms_allocatable
* trees are cleared out and then repopulated from with the free
* entries of their respective ms_sm space maps.
*/
static void
verify_checkpoint_vdev_spacemaps(spa_t *checkpoint, spa_t *current)
{
vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev;
vdev_t *current_rvd = current->spa_root_vdev;
load_concrete_ms_allocatable_trees(checkpoint, SM_FREE);
for (uint64_t c = 0; c < ckpoint_rvd->vdev_children; c++) {
vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[c];
vdev_t *current_vd = current_rvd->vdev_child[c];
space_map_t *checkpoint_sm = NULL;
uint64_t checkpoint_sm_obj;
if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) {
/*
* Since we don't allow device removal in a pool
* that has a checkpoint, we expect that all removed
* vdevs were removed from the pool before the
* checkpoint.
*/
ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops);
continue;
}
/*
* If the checkpoint space map doesn't exist, then nothing
* here is checkpointed so there's nothing to verify.
*/
if (current_vd->vdev_top_zap == 0 ||
zap_contains(spa_meta_objset(current),
current_vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
continue;
VERIFY0(zap_lookup(spa_meta_objset(current),
current_vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
sizeof (uint64_t), 1, &checkpoint_sm_obj));
VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(current),
checkpoint_sm_obj, 0, current_vd->vdev_asize,
current_vd->vdev_ashift));
verify_checkpoint_sm_entry_cb_arg_t vcsec;
vcsec.vcsec_vd = ckpoint_vd;
vcsec.vcsec_entryid = 0;
vcsec.vcsec_num_entries =
space_map_length(checkpoint_sm) / sizeof (uint64_t);
VERIFY0(space_map_iterate(checkpoint_sm,
space_map_length(checkpoint_sm),
verify_checkpoint_sm_entry_cb, &vcsec));
if (dump_opt['m'] > 3)
dump_spacemap(current->spa_meta_objset, checkpoint_sm);
space_map_close(checkpoint_sm);
}
/*
* If we've added vdevs since we took the checkpoint, ensure
* that their checkpoint space maps are empty.
*/
if (ckpoint_rvd->vdev_children < current_rvd->vdev_children) {
for (uint64_t c = ckpoint_rvd->vdev_children;
c < current_rvd->vdev_children; c++) {
vdev_t *current_vd = current_rvd->vdev_child[c];
VERIFY3P(current_vd->vdev_checkpoint_sm, ==, NULL);
}
}
/* for cleaner progress output */
(void) fprintf(stderr, "\n");
}
/*
* Verifies that all space that's allocated in the checkpoint is
* still allocated in the current version, by checking that everything
* in checkpoint's ms_allocatable (which is actually allocated, not
* allocatable/free) is not present in current's ms_allocatable.
*
* Note that the function changes the state of the ms_allocatable
* trees of both spas when called. The entries of all ms_allocatable
* trees are cleared out and then repopulated from their respective
* ms_sm space maps. In the checkpointed state we load the allocated
* entries, and in the current state we load the free entries.
*/
static void
verify_checkpoint_ms_spacemaps(spa_t *checkpoint, spa_t *current)
{
vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev;
vdev_t *current_rvd = current->spa_root_vdev;
load_concrete_ms_allocatable_trees(checkpoint, SM_ALLOC);
load_concrete_ms_allocatable_trees(current, SM_FREE);
for (uint64_t i = 0; i < ckpoint_rvd->vdev_children; i++) {
vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[i];
vdev_t *current_vd = current_rvd->vdev_child[i];
if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) {
/*
* See comment in verify_checkpoint_vdev_spacemaps()
*/
ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops);
continue;
}
for (uint64_t m = 0; m < ckpoint_vd->vdev_ms_count; m++) {
metaslab_t *ckpoint_msp = ckpoint_vd->vdev_ms[m];
metaslab_t *current_msp = current_vd->vdev_ms[m];
(void) fprintf(stderr,
"\rverifying vdev %llu of %llu, "
"metaslab %llu of %llu ...",
(longlong_t)current_vd->vdev_id,
(longlong_t)current_rvd->vdev_children,
(longlong_t)current_vd->vdev_ms[m]->ms_id,
(longlong_t)current_vd->vdev_ms_count);
/*
* We walk through the ms_allocatable trees that
* are loaded with the allocated blocks from the
* ms_sm spacemaps of the checkpoint. For each
* one of these ranges we ensure that none of them
* exists in the ms_allocatable trees of the
* current state which are loaded with the ranges
* that are currently free.
*
* This way we ensure that none of the blocks that
* are part of the checkpoint were freed by mistake.
*/
range_tree_walk(ckpoint_msp->ms_allocatable,
(range_tree_func_t *)range_tree_verify_not_present,
current_msp->ms_allocatable);
}
}
/* for cleaner progress output */
(void) fprintf(stderr, "\n");
}
static void
verify_checkpoint_blocks(spa_t *spa)
{
ASSERT(!dump_opt['L']);
spa_t *checkpoint_spa;
char *checkpoint_pool;
int error = 0;
/*
* We import the checkpointed state of the pool (under a different
* name) so we can do verification on it against the current state
* of the pool.
*/
checkpoint_pool = import_checkpointed_state(spa->spa_name, NULL,
NULL);
ASSERT(strcmp(spa->spa_name, checkpoint_pool) != 0);
error = spa_open(checkpoint_pool, &checkpoint_spa, FTAG);
if (error != 0) {
fatal("Tried to open pool \"%s\" but spa_open() failed with "
"error %d\n", checkpoint_pool, error);
}
/*
* Ensure that ranges in the checkpoint space maps of each vdev
* are allocated according to the checkpointed state's metaslab
* space maps.
*/
verify_checkpoint_vdev_spacemaps(checkpoint_spa, spa);
/*
* Ensure that allocated ranges in the checkpoint's metaslab
* space maps remain allocated in the metaslab space maps of
* the current state.
*/
verify_checkpoint_ms_spacemaps(checkpoint_spa, spa);
/*
* Once we are done, we get rid of the checkpointed state.
*/
spa_close(checkpoint_spa, FTAG);
free(checkpoint_pool);
}
static void
dump_leftover_checkpoint_blocks(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
space_map_t *checkpoint_sm = NULL;
uint64_t checkpoint_sm_obj;
if (vd->vdev_top_zap == 0)
continue;
if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
continue;
VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
sizeof (uint64_t), 1, &checkpoint_sm_obj));
VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa),
checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift));
dump_spacemap(spa->spa_meta_objset, checkpoint_sm);
space_map_close(checkpoint_sm);
}
}
static int
verify_checkpoint(spa_t *spa)
{
uberblock_t checkpoint;
int error;
if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (0);
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
if (error == ENOENT && !dump_opt['L']) {
/*
* If the feature is active but the uberblock is missing
* then we must be in the middle of discarding the
* checkpoint.
*/
(void) printf("\nPartially discarded checkpoint "
"state found:\n");
if (dump_opt['m'] > 3)
dump_leftover_checkpoint_blocks(spa);
return (0);
} else if (error != 0) {
(void) printf("lookup error %d when looking for "
"checkpointed uberblock in MOS\n", error);
return (error);
}
dump_uberblock(&checkpoint, "\nCheckpointed uberblock found:\n", "\n");
if (checkpoint.ub_checkpoint_txg == 0) {
(void) printf("\nub_checkpoint_txg not set in checkpointed "
"uberblock\n");
error = 3;
}
if (error == 0 && !dump_opt['L'])
verify_checkpoint_blocks(spa);
return (error);
}
static void
mos_leaks_cb(void *arg, uint64_t start, uint64_t size)
{
(void) arg;
for (uint64_t i = start; i < size; i++) {
(void) printf("MOS object %llu referenced but not allocated\n",
(u_longlong_t)i);
}
}
static void
mos_obj_refd(uint64_t obj)
{
if (obj != 0 && mos_refd_objs != NULL)
range_tree_add(mos_refd_objs, obj, 1);
}
/*
* Call on a MOS object that may already have been referenced.
*/
static void
mos_obj_refd_multiple(uint64_t obj)
{
if (obj != 0 && mos_refd_objs != NULL &&
!range_tree_contains(mos_refd_objs, obj, 1))
range_tree_add(mos_refd_objs, obj, 1);
}
static void
mos_leak_vdev_top_zap(vdev_t *vd)
{
uint64_t ms_flush_data_obj;
int error = zap_lookup(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
sizeof (ms_flush_data_obj), 1, &ms_flush_data_obj);
if (error == ENOENT)
return;
ASSERT0(error);
mos_obj_refd(ms_flush_data_obj);
}
static void
mos_leak_vdev(vdev_t *vd)
{
mos_obj_refd(vd->vdev_dtl_object);
mos_obj_refd(vd->vdev_ms_array);
mos_obj_refd(vd->vdev_indirect_config.vic_births_object);
mos_obj_refd(vd->vdev_indirect_config.vic_mapping_object);
mos_obj_refd(vd->vdev_leaf_zap);
if (vd->vdev_checkpoint_sm != NULL)
mos_obj_refd(vd->vdev_checkpoint_sm->sm_object);
if (vd->vdev_indirect_mapping != NULL) {
mos_obj_refd(vd->vdev_indirect_mapping->
vim_phys->vimp_counts_object);
}
if (vd->vdev_obsolete_sm != NULL)
mos_obj_refd(vd->vdev_obsolete_sm->sm_object);
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *ms = vd->vdev_ms[m];
mos_obj_refd(space_map_object(ms->ms_sm));
}
if (vd->vdev_root_zap != 0)
mos_obj_refd(vd->vdev_root_zap);
if (vd->vdev_top_zap != 0) {
mos_obj_refd(vd->vdev_top_zap);
mos_leak_vdev_top_zap(vd);
}
for (uint64_t c = 0; c < vd->vdev_children; c++) {
mos_leak_vdev(vd->vdev_child[c]);
}
}
static void
mos_leak_log_spacemaps(spa_t *spa)
{
uint64_t spacemap_zap;
int error = zap_lookup(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_LOG_SPACEMAP_ZAP,
sizeof (spacemap_zap), 1, &spacemap_zap);
if (error == ENOENT)
return;
ASSERT0(error);
mos_obj_refd(spacemap_zap);
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls))
mos_obj_refd(sls->sls_sm_obj);
}
static void
errorlog_count_refd(objset_t *mos, uint64_t errlog)
{
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, mos, errlog);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
mos_obj_refd(za.za_first_integer);
}
zap_cursor_fini(&zc);
}
static int
dump_mos_leaks(spa_t *spa)
{
int rv = 0;
objset_t *mos = spa->spa_meta_objset;
dsl_pool_t *dp = spa->spa_dsl_pool;
/* Visit and mark all referenced objects in the MOS */
mos_obj_refd(DMU_POOL_DIRECTORY_OBJECT);
mos_obj_refd(spa->spa_pool_props_object);
mos_obj_refd(spa->spa_config_object);
mos_obj_refd(spa->spa_ddt_stat_object);
mos_obj_refd(spa->spa_feat_desc_obj);
mos_obj_refd(spa->spa_feat_enabled_txg_obj);
mos_obj_refd(spa->spa_feat_for_read_obj);
mos_obj_refd(spa->spa_feat_for_write_obj);
mos_obj_refd(spa->spa_history);
mos_obj_refd(spa->spa_errlog_last);
mos_obj_refd(spa->spa_errlog_scrub);
if (spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
errorlog_count_refd(mos, spa->spa_errlog_last);
errorlog_count_refd(mos, spa->spa_errlog_scrub);
}
mos_obj_refd(spa->spa_all_vdev_zaps);
mos_obj_refd(spa->spa_dsl_pool->dp_bptree_obj);
mos_obj_refd(spa->spa_dsl_pool->dp_tmp_userrefs_obj);
mos_obj_refd(spa->spa_dsl_pool->dp_scan->scn_phys.scn_queue_obj);
bpobj_count_refd(&spa->spa_deferred_bpobj);
mos_obj_refd(dp->dp_empty_bpobj);
bpobj_count_refd(&dp->dp_obsolete_bpobj);
bpobj_count_refd(&dp->dp_free_bpobj);
mos_obj_refd(spa->spa_l2cache.sav_object);
mos_obj_refd(spa->spa_spares.sav_object);
if (spa->spa_syncing_log_sm != NULL)
mos_obj_refd(spa->spa_syncing_log_sm->sm_object);
mos_leak_log_spacemaps(spa);
mos_obj_refd(spa->spa_condensing_indirect_phys.
scip_next_mapping_object);
mos_obj_refd(spa->spa_condensing_indirect_phys.
scip_prev_obsolete_sm_object);
if (spa->spa_condensing_indirect_phys.scip_next_mapping_object != 0) {
vdev_indirect_mapping_t *vim =
vdev_indirect_mapping_open(mos,
spa->spa_condensing_indirect_phys.scip_next_mapping_object);
mos_obj_refd(vim->vim_phys->vimp_counts_object);
vdev_indirect_mapping_close(vim);
}
deleted_livelists_dump_mos(spa);
if (dp->dp_origin_snap != NULL) {
dsl_dataset_t *ds;
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(dp->dp_origin_snap)->ds_next_snap_obj,
FTAG, &ds));
count_ds_mos_objects(ds);
dump_blkptr_list(&ds->ds_deadlist, "Deadlist");
dsl_dataset_rele(ds, FTAG);
dsl_pool_config_exit(dp, FTAG);
count_ds_mos_objects(dp->dp_origin_snap);
dump_blkptr_list(&dp->dp_origin_snap->ds_deadlist, "Deadlist");
}
count_dir_mos_objects(dp->dp_mos_dir);
if (dp->dp_free_dir != NULL)
count_dir_mos_objects(dp->dp_free_dir);
if (dp->dp_leak_dir != NULL)
count_dir_mos_objects(dp->dp_leak_dir);
mos_leak_vdev(spa->spa_root_vdev);
for (uint64_t class = 0; class < DDT_CLASSES; class++) {
for (uint64_t type = 0; type < DDT_TYPES; type++) {
for (uint64_t cksum = 0;
cksum < ZIO_CHECKSUM_FUNCTIONS; cksum++) {
ddt_t *ddt = spa->spa_ddt[cksum];
mos_obj_refd(ddt->ddt_object[type][class]);
}
}
}
/*
* Visit all allocated objects and make sure they are referenced.
*/
uint64_t object = 0;
while (dmu_object_next(mos, &object, B_FALSE, 0) == 0) {
if (range_tree_contains(mos_refd_objs, object, 1)) {
range_tree_remove(mos_refd_objs, object, 1);
} else {
dmu_object_info_t doi;
const char *name;
VERIFY0(dmu_object_info(mos, object, &doi));
if (doi.doi_type & DMU_OT_NEWTYPE) {
dmu_object_byteswap_t bswap =
DMU_OT_BYTESWAP(doi.doi_type);
name = dmu_ot_byteswap[bswap].ob_name;
} else {
name = dmu_ot[doi.doi_type].ot_name;
}
(void) printf("MOS object %llu (%s) leaked\n",
(u_longlong_t)object, name);
rv = 2;
}
}
(void) range_tree_walk(mos_refd_objs, mos_leaks_cb, NULL);
if (!range_tree_is_empty(mos_refd_objs))
rv = 2;
range_tree_vacate(mos_refd_objs, NULL, NULL);
range_tree_destroy(mos_refd_objs);
return (rv);
}
typedef struct log_sm_obsolete_stats_arg {
uint64_t lsos_current_txg;
uint64_t lsos_total_entries;
uint64_t lsos_valid_entries;
uint64_t lsos_sm_entries;
uint64_t lsos_valid_sm_entries;
} log_sm_obsolete_stats_arg_t;
static int
log_spacemap_obsolete_stats_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
log_sm_obsolete_stats_arg_t *lsos = arg;
uint64_t offset = sme->sme_offset;
uint64_t vdev_id = sme->sme_vdev;
if (lsos->lsos_current_txg == 0) {
/* this is the first log */
lsos->lsos_current_txg = txg;
} else if (lsos->lsos_current_txg < txg) {
/* we just changed log - print stats and reset */
(void) printf("%-8llu valid entries out of %-8llu - txg %llu\n",
(u_longlong_t)lsos->lsos_valid_sm_entries,
(u_longlong_t)lsos->lsos_sm_entries,
(u_longlong_t)lsos->lsos_current_txg);
lsos->lsos_valid_sm_entries = 0;
lsos->lsos_sm_entries = 0;
lsos->lsos_current_txg = txg;
}
ASSERT3U(lsos->lsos_current_txg, ==, txg);
lsos->lsos_sm_entries++;
lsos->lsos_total_entries++;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
lsos->lsos_valid_sm_entries++;
lsos->lsos_valid_entries++;
return (0);
}
static void
dump_log_spacemap_obsolete_stats(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
log_sm_obsolete_stats_arg_t lsos = {0};
(void) printf("Log Space Map Obsolete Entry Statistics:\n");
iterate_through_spacemap_logs(spa,
log_spacemap_obsolete_stats_cb, &lsos);
/* print stats for latest log */
(void) printf("%-8llu valid entries out of %-8llu - txg %llu\n",
(u_longlong_t)lsos.lsos_valid_sm_entries,
(u_longlong_t)lsos.lsos_sm_entries,
(u_longlong_t)lsos.lsos_current_txg);
(void) printf("%-8llu valid entries out of %-8llu - total\n\n",
(u_longlong_t)lsos.lsos_valid_entries,
(u_longlong_t)lsos.lsos_total_entries);
}
static void
dump_zpool(spa_t *spa)
{
dsl_pool_t *dp = spa_get_dsl(spa);
int rc = 0;
if (dump_opt['y']) {
livelist_metaslab_validate(spa);
}
if (dump_opt['S']) {
dump_simulated_ddt(spa);
return;
}
if (!dump_opt['e'] && dump_opt['C'] > 1) {
(void) printf("\nCached configuration:\n");
dump_nvlist(spa->spa_config, 8);
}
if (dump_opt['C'])
dump_config(spa);
if (dump_opt['u'])
dump_uberblock(&spa->spa_uberblock, "\nUberblock:\n", "\n");
if (dump_opt['D'])
dump_all_ddts(spa);
if (dump_opt['d'] > 2 || dump_opt['m'])
dump_metaslabs(spa);
if (dump_opt['M'])
dump_metaslab_groups(spa, dump_opt['M'] > 1);
if (dump_opt['d'] > 2 || dump_opt['m']) {
dump_log_spacemaps(spa);
dump_log_spacemap_obsolete_stats(spa);
}
if (dump_opt['d'] || dump_opt['i']) {
spa_feature_t f;
mos_refd_objs = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
0);
dump_objset(dp->dp_meta_objset);
if (dump_opt['d'] >= 3) {
dsl_pool_t *dp = spa->spa_dsl_pool;
dump_full_bpobj(&spa->spa_deferred_bpobj,
"Deferred frees", 0);
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
dump_full_bpobj(&dp->dp_free_bpobj,
"Pool snapshot frees", 0);
}
if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_DEVICE_REMOVAL));
dump_full_bpobj(&dp->dp_obsolete_bpobj,
"Pool obsolete blocks", 0);
}
if (spa_feature_is_active(spa,
SPA_FEATURE_ASYNC_DESTROY)) {
dump_bptree(spa->spa_meta_objset,
dp->dp_bptree_obj,
"Pool dataset frees");
}
dump_dtl(spa->spa_root_vdev, 0);
}
for (spa_feature_t f = 0; f < SPA_FEATURES; f++)
global_feature_count[f] = UINT64_MAX;
global_feature_count[SPA_FEATURE_REDACTION_BOOKMARKS] = 0;
global_feature_count[SPA_FEATURE_BOOKMARK_WRITTEN] = 0;
global_feature_count[SPA_FEATURE_LIVELIST] = 0;
(void) dmu_objset_find(spa_name(spa), dump_one_objset,
NULL, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
if (rc == 0 && !dump_opt['L'])
rc = dump_mos_leaks(spa);
for (f = 0; f < SPA_FEATURES; f++) {
uint64_t refcount;
uint64_t *arr;
if (!(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET)) {
if (global_feature_count[f] == UINT64_MAX)
continue;
if (!spa_feature_is_enabled(spa, f)) {
ASSERT0(global_feature_count[f]);
continue;
}
arr = global_feature_count;
} else {
if (!spa_feature_is_enabled(spa, f)) {
ASSERT0(dataset_feature_count[f]);
continue;
}
arr = dataset_feature_count;
}
if (feature_get_refcount(spa, &spa_feature_table[f],
&refcount) == ENOTSUP)
continue;
if (arr[f] != refcount) {
(void) printf("%s feature refcount mismatch: "
"%lld consumers != %lld refcount\n",
spa_feature_table[f].fi_uname,
(longlong_t)arr[f], (longlong_t)refcount);
rc = 2;
} else {
(void) printf("Verified %s feature refcount "
"of %llu is correct\n",
spa_feature_table[f].fi_uname,
(longlong_t)refcount);
}
}
if (rc == 0)
rc = verify_device_removal_feature_counts(spa);
}
if (rc == 0 && (dump_opt['b'] || dump_opt['c']))
rc = dump_block_stats(spa);
if (rc == 0)
rc = verify_spacemap_refcounts(spa);
if (dump_opt['s'])
show_pool_stats(spa);
if (dump_opt['h'])
dump_history(spa);
if (rc == 0)
rc = verify_checkpoint(spa);
if (rc != 0) {
dump_debug_buffer();
exit(rc);
}
}
#define ZDB_FLAG_CHECKSUM 0x0001
#define ZDB_FLAG_DECOMPRESS 0x0002
#define ZDB_FLAG_BSWAP 0x0004
#define ZDB_FLAG_GBH 0x0008
#define ZDB_FLAG_INDIRECT 0x0010
#define ZDB_FLAG_RAW 0x0020
#define ZDB_FLAG_PRINT_BLKPTR 0x0040
#define ZDB_FLAG_VERBOSE 0x0080
static int flagbits[256];
static char flagbitstr[16];
static void
zdb_print_blkptr(const blkptr_t *bp, int flags)
{
char blkbuf[BP_SPRINTF_LEN];
if (flags & ZDB_FLAG_BSWAP)
byteswap_uint64_array((void *)bp, sizeof (blkptr_t));
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("%s\n", blkbuf);
}
static void
zdb_dump_indirect(blkptr_t *bp, int nbps, int flags)
{
int i;
for (i = 0; i < nbps; i++)
zdb_print_blkptr(&bp[i], flags);
}
static void
zdb_dump_gbh(void *buf, int flags)
{
zdb_dump_indirect((blkptr_t *)buf, SPA_GBH_NBLKPTRS, flags);
}
static void
zdb_dump_block_raw(void *buf, uint64_t size, int flags)
{
if (flags & ZDB_FLAG_BSWAP)
byteswap_uint64_array(buf, size);
VERIFY(write(fileno(stdout), buf, size) == size);
}
static void
zdb_dump_block(char *label, void *buf, uint64_t size, int flags)
{
uint64_t *d = (uint64_t *)buf;
unsigned nwords = size / sizeof (uint64_t);
int do_bswap = !!(flags & ZDB_FLAG_BSWAP);
unsigned i, j;
const char *hdr;
char *c;
if (do_bswap)
hdr = " 7 6 5 4 3 2 1 0 f e d c b a 9 8";
else
hdr = " 0 1 2 3 4 5 6 7 8 9 a b c d e f";
(void) printf("\n%s\n%6s %s 0123456789abcdef\n", label, "", hdr);
#ifdef _LITTLE_ENDIAN
/* correct the endianness */
do_bswap = !do_bswap;
#endif
for (i = 0; i < nwords; i += 2) {
(void) printf("%06llx: %016llx %016llx ",
(u_longlong_t)(i * sizeof (uint64_t)),
(u_longlong_t)(do_bswap ? BSWAP_64(d[i]) : d[i]),
(u_longlong_t)(do_bswap ? BSWAP_64(d[i + 1]) : d[i + 1]));
c = (char *)&d[i];
for (j = 0; j < 2 * sizeof (uint64_t); j++)
(void) printf("%c", isprint(c[j]) ? c[j] : '.');
(void) printf("\n");
}
}
/*
* There are two acceptable formats:
* leaf_name - For example: c1t0d0 or /tmp/ztest.0a
* child[.child]* - For example: 0.1.1
*
* The second form can be used to specify arbitrary vdevs anywhere
* in the hierarchy. For example, in a pool with a mirror of
* RAID-Zs, you can specify either RAID-Z vdev with 0.0 or 0.1 .
*/
static vdev_t *
zdb_vdev_lookup(vdev_t *vdev, const char *path)
{
char *s, *p, *q;
unsigned i;
if (vdev == NULL)
return (NULL);
/* First, assume the x.x.x.x format */
i = strtoul(path, &s, 10);
if (s == path || (s && *s != '.' && *s != '\0'))
goto name;
if (i >= vdev->vdev_children)
return (NULL);
vdev = vdev->vdev_child[i];
if (s && *s == '\0')
return (vdev);
return (zdb_vdev_lookup(vdev, s+1));
name:
for (i = 0; i < vdev->vdev_children; i++) {
vdev_t *vc = vdev->vdev_child[i];
if (vc->vdev_path == NULL) {
vc = zdb_vdev_lookup(vc, path);
if (vc == NULL)
continue;
else
return (vc);
}
p = strrchr(vc->vdev_path, '/');
p = p ? p + 1 : vc->vdev_path;
q = &vc->vdev_path[strlen(vc->vdev_path) - 2];
if (strcmp(vc->vdev_path, path) == 0)
return (vc);
if (strcmp(p, path) == 0)
return (vc);
if (strcmp(q, "s0") == 0 && strncmp(p, path, q - p) == 0)
return (vc);
}
return (NULL);
}
static int
name_from_objset_id(spa_t *spa, uint64_t objset_id, char *outstr)
{
dsl_dataset_t *ds;
dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
int error = dsl_dataset_hold_obj(spa->spa_dsl_pool, objset_id,
NULL, &ds);
if (error != 0) {
(void) fprintf(stderr, "failed to hold objset %llu: %s\n",
(u_longlong_t)objset_id, strerror(error));
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
return (error);
}
dsl_dataset_name(ds, outstr);
dsl_dataset_rele(ds, NULL);
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
return (0);
}
static boolean_t
zdb_parse_block_sizes(char *sizes, uint64_t *lsize, uint64_t *psize)
{
char *s0, *s1, *tmp = NULL;
if (sizes == NULL)
return (B_FALSE);
s0 = strtok_r(sizes, "/", &tmp);
if (s0 == NULL)
return (B_FALSE);
s1 = strtok_r(NULL, "/", &tmp);
*lsize = strtoull(s0, NULL, 16);
*psize = s1 ? strtoull(s1, NULL, 16) : *lsize;
return (*lsize >= *psize && *psize > 0);
}
#define ZIO_COMPRESS_MASK(alg) (1ULL << (ZIO_COMPRESS_##alg))
static boolean_t
zdb_decompress_block(abd_t *pabd, void *buf, void *lbuf, uint64_t lsize,
uint64_t psize, int flags)
{
(void) buf;
boolean_t exceeded = B_FALSE;
/*
* We don't know how the data was compressed, so just try
* every decompress function at every inflated blocksize.
*/
void *lbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
int cfuncs[ZIO_COMPRESS_FUNCTIONS] = { 0 };
int *cfuncp = cfuncs;
uint64_t maxlsize = SPA_MAXBLOCKSIZE;
uint64_t mask = ZIO_COMPRESS_MASK(ON) | ZIO_COMPRESS_MASK(OFF) |
ZIO_COMPRESS_MASK(INHERIT) | ZIO_COMPRESS_MASK(EMPTY) |
(getenv("ZDB_NO_ZLE") ? ZIO_COMPRESS_MASK(ZLE) : 0);
*cfuncp++ = ZIO_COMPRESS_LZ4;
*cfuncp++ = ZIO_COMPRESS_LZJB;
mask |= ZIO_COMPRESS_MASK(LZ4) | ZIO_COMPRESS_MASK(LZJB);
for (int c = 0; c < ZIO_COMPRESS_FUNCTIONS; c++)
if (((1ULL << c) & mask) == 0)
*cfuncp++ = c;
/*
* On the one hand, with SPA_MAXBLOCKSIZE at 16MB, this
* could take a while and we should let the user know
* we are not stuck. On the other hand, printing progress
* info gets old after a while. User can specify 'v' flag
* to see the progression.
*/
if (lsize == psize)
lsize += SPA_MINBLOCKSIZE;
else
maxlsize = lsize;
for (; lsize <= maxlsize; lsize += SPA_MINBLOCKSIZE) {
for (cfuncp = cfuncs; *cfuncp; cfuncp++) {
if (flags & ZDB_FLAG_VERBOSE) {
(void) fprintf(stderr,
"Trying %05llx -> %05llx (%s)\n",
(u_longlong_t)psize,
(u_longlong_t)lsize,
zio_compress_table[*cfuncp].\
ci_name);
}
/*
* We randomize lbuf2, and decompress to both
* lbuf and lbuf2. This way, we will know if
* decompression fill exactly to lsize.
*/
VERIFY0(random_get_pseudo_bytes(lbuf2, lsize));
if (zio_decompress_data(*cfuncp, pabd,
lbuf, psize, lsize, NULL) == 0 &&
zio_decompress_data(*cfuncp, pabd,
lbuf2, psize, lsize, NULL) == 0 &&
memcmp(lbuf, lbuf2, lsize) == 0)
break;
}
if (*cfuncp != 0)
break;
}
umem_free(lbuf2, SPA_MAXBLOCKSIZE);
if (lsize > maxlsize) {
exceeded = B_TRUE;
}
if (*cfuncp == ZIO_COMPRESS_ZLE) {
printf("\nZLE decompression was selected. If you "
"suspect the results are wrong,\ntry avoiding ZLE "
"by setting and exporting ZDB_NO_ZLE=\"true\"\n");
}
return (exceeded);
}
/*
* Read a block from a pool and print it out. The syntax of the
* block descriptor is:
*
* pool:vdev_specifier:offset:[lsize/]psize[:flags]
*
* pool - The name of the pool you wish to read from
* vdev_specifier - Which vdev (see comment for zdb_vdev_lookup)
* offset - offset, in hex, in bytes
* size - Amount of data to read, in hex, in bytes
* flags - A string of characters specifying options
* b: Decode a blkptr at given offset within block
* c: Calculate and display checksums
* d: Decompress data before dumping
* e: Byteswap data before dumping
* g: Display data as a gang block header
* i: Display as an indirect block
* r: Dump raw data to stdout
* v: Verbose
*
*/
static void
zdb_read_block(char *thing, spa_t *spa)
{
blkptr_t blk, *bp = &blk;
dva_t *dva = bp->blk_dva;
int flags = 0;
uint64_t offset = 0, psize = 0, lsize = 0, blkptr_offset = 0;
zio_t *zio;
vdev_t *vd;
abd_t *pabd;
void *lbuf, *buf;
char *s, *p, *dup, *flagstr, *sizes, *tmp = NULL;
const char *vdev, *errmsg = NULL;
int i, error;
boolean_t borrowed = B_FALSE, found = B_FALSE;
dup = strdup(thing);
s = strtok_r(dup, ":", &tmp);
vdev = s ?: "";
s = strtok_r(NULL, ":", &tmp);
offset = strtoull(s ? s : "", NULL, 16);
sizes = strtok_r(NULL, ":", &tmp);
s = strtok_r(NULL, ":", &tmp);
flagstr = strdup(s ?: "");
if (!zdb_parse_block_sizes(sizes, &lsize, &psize))
errmsg = "invalid size(s)";
if (!IS_P2ALIGNED(psize, DEV_BSIZE) || !IS_P2ALIGNED(lsize, DEV_BSIZE))
errmsg = "size must be a multiple of sector size";
if (!IS_P2ALIGNED(offset, DEV_BSIZE))
errmsg = "offset must be a multiple of sector size";
if (errmsg) {
(void) printf("Invalid block specifier: %s - %s\n",
thing, errmsg);
goto done;
}
tmp = NULL;
for (s = strtok_r(flagstr, ":", &tmp);
s != NULL;
s = strtok_r(NULL, ":", &tmp)) {
for (i = 0; i < strlen(flagstr); i++) {
int bit = flagbits[(uchar_t)flagstr[i]];
if (bit == 0) {
(void) printf("***Ignoring flag: %c\n",
(uchar_t)flagstr[i]);
continue;
}
found = B_TRUE;
flags |= bit;
p = &flagstr[i + 1];
if (*p != ':' && *p != '\0') {
int j = 0, nextbit = flagbits[(uchar_t)*p];
char *end, offstr[8] = { 0 };
if ((bit == ZDB_FLAG_PRINT_BLKPTR) &&
(nextbit == 0)) {
/* look ahead to isolate the offset */
while (nextbit == 0 &&
strchr(flagbitstr, *p) == NULL) {
offstr[j] = *p;
j++;
if (i + j > strlen(flagstr))
break;
p++;
nextbit = flagbits[(uchar_t)*p];
}
blkptr_offset = strtoull(offstr, &end,
16);
i += j;
} else if (nextbit == 0) {
(void) printf("***Ignoring flag arg:"
" '%c'\n", (uchar_t)*p);
}
}
}
}
if (blkptr_offset % sizeof (blkptr_t)) {
printf("Block pointer offset 0x%llx "
"must be divisible by 0x%x\n",
(longlong_t)blkptr_offset, (int)sizeof (blkptr_t));
goto done;
}
if (found == B_FALSE && strlen(flagstr) > 0) {
printf("Invalid flag arg: '%s'\n", flagstr);
goto done;
}
vd = zdb_vdev_lookup(spa->spa_root_vdev, vdev);
if (vd == NULL) {
(void) printf("***Invalid vdev: %s\n", vdev);
goto done;
} else {
if (vd->vdev_path)
(void) fprintf(stderr, "Found vdev: %s\n",
vd->vdev_path);
else
(void) fprintf(stderr, "Found vdev type: %s\n",
vd->vdev_ops->vdev_op_type);
}
pabd = abd_alloc_for_io(SPA_MAXBLOCKSIZE, B_FALSE);
lbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
BP_ZERO(bp);
DVA_SET_VDEV(&dva[0], vd->vdev_id);
DVA_SET_OFFSET(&dva[0], offset);
DVA_SET_GANG(&dva[0], !!(flags & ZDB_FLAG_GBH));
DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, psize));
BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL);
BP_SET_LSIZE(bp, lsize);
BP_SET_PSIZE(bp, psize);
BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF);
BP_SET_TYPE(bp, DMU_OT_NONE);
BP_SET_LEVEL(bp, 0);
BP_SET_DEDUP(bp, 0);
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
zio = zio_root(spa, NULL, NULL, 0);
if (vd == vd->vdev_top) {
/*
* Treat this as a normal block read.
*/
zio_nowait(zio_read(zio, spa, bp, pabd, psize, NULL, NULL,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL));
} else {
/*
* Treat this as a vdev child I/O.
*/
zio_nowait(zio_vdev_child_io(zio, bp, vd, offset, pabd,
psize, ZIO_TYPE_READ, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY |
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW | ZIO_FLAG_OPTIONAL,
NULL, NULL));
}
error = zio_wait(zio);
spa_config_exit(spa, SCL_STATE, FTAG);
if (error) {
(void) printf("Read of %s failed, error: %d\n", thing, error);
goto out;
}
uint64_t orig_lsize = lsize;
buf = lbuf;
if (flags & ZDB_FLAG_DECOMPRESS) {
boolean_t failed = zdb_decompress_block(pabd, buf, lbuf,
lsize, psize, flags);
if (failed) {
(void) printf("Decompress of %s failed\n", thing);
goto out;
}
} else {
buf = abd_borrow_buf_copy(pabd, lsize);
borrowed = B_TRUE;
}
/*
* Try to detect invalid block pointer. If invalid, try
* decompressing.
*/
if ((flags & ZDB_FLAG_PRINT_BLKPTR || flags & ZDB_FLAG_INDIRECT) &&
!(flags & ZDB_FLAG_DECOMPRESS)) {
const blkptr_t *b = (const blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset);
if (zfs_blkptr_verify(spa, b,
BLK_CONFIG_NEEDED, BLK_VERIFY_ONLY) == B_FALSE) {
abd_return_buf_copy(pabd, buf, lsize);
borrowed = B_FALSE;
buf = lbuf;
boolean_t failed = zdb_decompress_block(pabd, buf,
lbuf, lsize, psize, flags);
b = (const blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset);
if (failed || zfs_blkptr_verify(spa, b,
BLK_CONFIG_NEEDED, BLK_VERIFY_LOG) == B_FALSE) {
printf("invalid block pointer at this DVA\n");
goto out;
}
}
}
if (flags & ZDB_FLAG_PRINT_BLKPTR)
zdb_print_blkptr((blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset), flags);
else if (flags & ZDB_FLAG_RAW)
zdb_dump_block_raw(buf, lsize, flags);
else if (flags & ZDB_FLAG_INDIRECT)
zdb_dump_indirect((blkptr_t *)buf,
orig_lsize / sizeof (blkptr_t), flags);
else if (flags & ZDB_FLAG_GBH)
zdb_dump_gbh(buf, flags);
else
zdb_dump_block(thing, buf, lsize, flags);
/*
* If :c was specified, iterate through the checksum table to
* calculate and display each checksum for our specified
* DVA and length.
*/
if ((flags & ZDB_FLAG_CHECKSUM) && !(flags & ZDB_FLAG_RAW) &&
!(flags & ZDB_FLAG_GBH)) {
zio_t *czio;
(void) printf("\n");
for (enum zio_checksum ck = ZIO_CHECKSUM_LABEL;
ck < ZIO_CHECKSUM_FUNCTIONS; ck++) {
if ((zio_checksum_table[ck].ci_flags &
ZCHECKSUM_FLAG_EMBEDDED) ||
ck == ZIO_CHECKSUM_NOPARITY) {
continue;
}
BP_SET_CHECKSUM(bp, ck);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
czio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
czio->io_bp = bp;
if (vd == vd->vdev_top) {
zio_nowait(zio_read(czio, spa, bp, pabd, psize,
NULL, NULL,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW |
ZIO_FLAG_DONT_RETRY, NULL));
} else {
zio_nowait(zio_vdev_child_io(czio, bp, vd,
offset, pabd, psize, ZIO_TYPE_READ,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY |
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW |
ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_OPTIONAL, NULL, NULL));
}
error = zio_wait(czio);
if (error == 0 || error == ECKSUM) {
zio_t *ck_zio = zio_root(spa, NULL, NULL, 0);
ck_zio->io_offset =
DVA_GET_OFFSET(&bp->blk_dva[0]);
ck_zio->io_bp = bp;
zio_checksum_compute(ck_zio, ck, pabd, lsize);
printf(
"%12s\t"
"cksum=%016llx:%016llx:%016llx:%016llx\n",
zio_checksum_table[ck].ci_name,
(u_longlong_t)bp->blk_cksum.zc_word[0],
(u_longlong_t)bp->blk_cksum.zc_word[1],
(u_longlong_t)bp->blk_cksum.zc_word[2],
(u_longlong_t)bp->blk_cksum.zc_word[3]);
zio_wait(ck_zio);
} else {
printf("error %d reading block\n", error);
}
spa_config_exit(spa, SCL_STATE, FTAG);
}
}
if (borrowed)
abd_return_buf_copy(pabd, buf, lsize);
out:
abd_free(pabd);
umem_free(lbuf, SPA_MAXBLOCKSIZE);
done:
free(flagstr);
free(dup);
}
static void
zdb_embedded_block(char *thing)
{
blkptr_t bp = {{{{0}}}};
unsigned long long *words = (void *)&bp;
char *buf;
int err;
err = sscanf(thing, "%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx:"
"%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx",
words + 0, words + 1, words + 2, words + 3,
words + 4, words + 5, words + 6, words + 7,
words + 8, words + 9, words + 10, words + 11,
words + 12, words + 13, words + 14, words + 15);
if (err != 16) {
(void) fprintf(stderr, "invalid input format\n");
exit(1);
}
ASSERT3U(BPE_GET_LSIZE(&bp), <=, SPA_MAXBLOCKSIZE);
buf = malloc(SPA_MAXBLOCKSIZE);
if (buf == NULL) {
(void) fprintf(stderr, "out of memory\n");
exit(1);
}
err = decode_embedded_bp(&bp, buf, BPE_GET_LSIZE(&bp));
if (err != 0) {
(void) fprintf(stderr, "decode failed: %u\n", err);
exit(1);
}
zdb_dump_block_raw(buf, BPE_GET_LSIZE(&bp), 0);
free(buf);
}
/* check for valid hex or decimal numeric string */
static boolean_t
zdb_numeric(char *str)
{
int i = 0;
if (strlen(str) == 0)
return (B_FALSE);
if (strncmp(str, "0x", 2) == 0 || strncmp(str, "0X", 2) == 0)
i = 2;
for (; i < strlen(str); i++) {
if (!isxdigit(str[i]))
return (B_FALSE);
}
return (B_TRUE);
}
int
main(int argc, char **argv)
{
int c;
spa_t *spa = NULL;
objset_t *os = NULL;
int dump_all = 1;
int verbose = 0;
int error = 0;
char **searchdirs = NULL;
int nsearch = 0;
char *target, *target_pool, dsname[ZFS_MAX_DATASET_NAME_LEN];
nvlist_t *policy = NULL;
uint64_t max_txg = UINT64_MAX;
int64_t objset_id = -1;
uint64_t object;
int flags = ZFS_IMPORT_MISSING_LOG;
int rewind = ZPOOL_NEVER_REWIND;
char *spa_config_path_env, *objset_str;
boolean_t target_is_spa = B_TRUE, dataset_lookup = B_FALSE;
nvlist_t *cfg = NULL;
dprintf_setup(&argc, argv);
/*
* If there is an environment variable SPA_CONFIG_PATH it overrides
* default spa_config_path setting. If -U flag is specified it will
* override this environment variable settings once again.
*/
spa_config_path_env = getenv("SPA_CONFIG_PATH");
if (spa_config_path_env != NULL)
spa_config_path = spa_config_path_env;
/*
* For performance reasons, we set this tunable down. We do so before
* the arg parsing section so that the user can override this value if
* they choose.
*/
zfs_btree_verify_intensity = 3;
struct option long_options[] = {
{"ignore-assertions", no_argument, NULL, 'A'},
{"block-stats", no_argument, NULL, 'b'},
{"backup", no_argument, NULL, 'B'},
{"checksum", no_argument, NULL, 'c'},
{"config", no_argument, NULL, 'C'},
{"datasets", no_argument, NULL, 'd'},
{"dedup-stats", no_argument, NULL, 'D'},
{"exported", no_argument, NULL, 'e'},
{"embedded-block-pointer", no_argument, NULL, 'E'},
{"automatic-rewind", no_argument, NULL, 'F'},
{"dump-debug-msg", no_argument, NULL, 'G'},
{"history", no_argument, NULL, 'h'},
{"intent-logs", no_argument, NULL, 'i'},
{"inflight", required_argument, NULL, 'I'},
{"checkpointed-state", no_argument, NULL, 'k'},
{"key", required_argument, NULL, 'K'},
{"label", no_argument, NULL, 'l'},
{"disable-leak-tracking", no_argument, NULL, 'L'},
{"metaslabs", no_argument, NULL, 'm'},
{"metaslab-groups", no_argument, NULL, 'M'},
{"numeric", no_argument, NULL, 'N'},
{"option", required_argument, NULL, 'o'},
{"object-lookups", no_argument, NULL, 'O'},
{"path", required_argument, NULL, 'p'},
{"parseable", no_argument, NULL, 'P'},
{"skip-label", no_argument, NULL, 'q'},
{"copy-object", no_argument, NULL, 'r'},
{"read-block", no_argument, NULL, 'R'},
{"io-stats", no_argument, NULL, 's'},
{"simulate-dedup", no_argument, NULL, 'S'},
{"txg", required_argument, NULL, 't'},
{"uberblock", no_argument, NULL, 'u'},
{"cachefile", required_argument, NULL, 'U'},
{"verbose", no_argument, NULL, 'v'},
{"verbatim", no_argument, NULL, 'V'},
{"dump-blocks", required_argument, NULL, 'x'},
{"extreme-rewind", no_argument, NULL, 'X'},
{"all-reconstruction", no_argument, NULL, 'Y'},
{"livelist", no_argument, NULL, 'y'},
{"zstd-headers", no_argument, NULL, 'Z'},
{0, 0, 0, 0}
};
while ((c = getopt_long(argc, argv,
"AbBcCdDeEFGhiI:kK:lLmMNo:Op:PqrRsSt:uU:vVx:XYyZ",
long_options, NULL)) != -1) {
switch (c) {
case 'b':
case 'B':
case 'c':
case 'C':
case 'd':
case 'D':
case 'E':
case 'G':
case 'h':
case 'i':
case 'l':
case 'm':
case 'M':
case 'N':
case 'O':
case 'r':
case 'R':
case 's':
case 'S':
case 'u':
case 'y':
case 'Z':
dump_opt[c]++;
dump_all = 0;
break;
case 'A':
case 'e':
case 'F':
case 'k':
case 'L':
case 'P':
case 'q':
case 'X':
dump_opt[c]++;
break;
case 'Y':
zfs_reconstruct_indirect_combinations_max = INT_MAX;
zfs_deadman_enabled = 0;
break;
/* NB: Sort single match options below. */
case 'I':
max_inflight_bytes = strtoull(optarg, NULL, 0);
if (max_inflight_bytes == 0) {
(void) fprintf(stderr, "maximum number "
"of inflight bytes must be greater "
"than 0\n");
usage();
}
break;
case 'K':
dump_opt[c]++;
key_material = strdup(optarg);
/* redact key material in process table */
while (*optarg != '\0') { *optarg++ = '*'; }
break;
case 'o':
error = set_global_var(optarg);
if (error != 0)
usage();
break;
case 'p':
if (searchdirs == NULL) {
searchdirs = umem_alloc(sizeof (char *),
UMEM_NOFAIL);
} else {
char **tmp = umem_alloc((nsearch + 1) *
sizeof (char *), UMEM_NOFAIL);
memcpy(tmp, searchdirs, nsearch *
sizeof (char *));
umem_free(searchdirs,
nsearch * sizeof (char *));
searchdirs = tmp;
}
searchdirs[nsearch++] = optarg;
break;
case 't':
max_txg = strtoull(optarg, NULL, 0);
if (max_txg < TXG_INITIAL) {
(void) fprintf(stderr, "incorrect txg "
"specified: %s\n", optarg);
usage();
}
break;
case 'U':
spa_config_path = optarg;
if (spa_config_path[0] != '/') {
(void) fprintf(stderr,
"cachefile must be an absolute path "
"(i.e. start with a slash)\n");
usage();
}
break;
case 'v':
verbose++;
break;
case 'V':
flags = ZFS_IMPORT_VERBATIM;
break;
case 'x':
vn_dumpdir = optarg;
break;
default:
usage();
break;
}
}
if (!dump_opt['e'] && searchdirs != NULL) {
(void) fprintf(stderr, "-p option requires use of -e\n");
usage();
}
#if defined(_LP64)
/*
* ZDB does not typically re-read blocks; therefore limit the ARC
* to 256 MB, which can be used entirely for metadata.
*/
zfs_arc_min = 2ULL << SPA_MAXBLOCKSHIFT;
zfs_arc_max = 256 * 1024 * 1024;
#endif
/*
* "zdb -c" uses checksum-verifying scrub i/os which are async reads.
* "zdb -b" uses traversal prefetch which uses async reads.
* For good performance, let several of them be active at once.
*/
zfs_vdev_async_read_max_active = 10;
/*
* Disable reference tracking for better performance.
*/
reference_tracking_enable = B_FALSE;
/*
* Do not fail spa_load when spa_load_verify fails. This is needed
* to load non-idle pools.
*/
spa_load_verify_dryrun = B_TRUE;
/*
* ZDB should have ability to read spacemaps.
*/
spa_mode_readable_spacemaps = B_TRUE;
kernel_init(SPA_MODE_READ);
if (dump_all)
verbose = MAX(verbose, 1);
for (c = 0; c < 256; c++) {
if (dump_all && strchr("ABeEFkKlLNOPrRSXy", c) == NULL)
dump_opt[c] = 1;
if (dump_opt[c])
dump_opt[c] += verbose;
}
libspl_set_assert_ok((dump_opt['A'] == 1) || (dump_opt['A'] > 2));
zfs_recover = (dump_opt['A'] > 1);
argc -= optind;
argv += optind;
if (argc < 2 && dump_opt['R'])
usage();
if (dump_opt['E']) {
if (argc != 1)
usage();
zdb_embedded_block(argv[0]);
return (0);
}
if (argc < 1) {
if (!dump_opt['e'] && dump_opt['C']) {
dump_cachefile(spa_config_path);
return (0);
}
usage();
}
if (dump_opt['l'])
return (dump_label(argv[0]));
if (dump_opt['O']) {
if (argc != 2)
usage();
dump_opt['v'] = verbose + 3;
return (dump_path(argv[0], argv[1], NULL));
}
if (dump_opt['r']) {
target_is_spa = B_FALSE;
if (argc != 3)
usage();
dump_opt['v'] = verbose;
error = dump_path(argv[0], argv[1], &object);
if (error != 0)
fatal("internal error: %s", strerror(error));
}
if (dump_opt['X'] || dump_opt['F'])
rewind = ZPOOL_DO_REWIND |
(dump_opt['X'] ? ZPOOL_EXTREME_REWIND : 0);
/* -N implies -d */
if (dump_opt['N'] && dump_opt['d'] == 0)
dump_opt['d'] = dump_opt['N'];
if (nvlist_alloc(&policy, NV_UNIQUE_NAME_TYPE, 0) != 0 ||
nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, max_txg) != 0 ||
nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, rewind) != 0)
fatal("internal error: %s", strerror(ENOMEM));
error = 0;
target = argv[0];
if (strpbrk(target, "/@") != NULL) {
size_t targetlen;
target_pool = strdup(target);
*strpbrk(target_pool, "/@") = '\0';
target_is_spa = B_FALSE;
targetlen = strlen(target);
if (targetlen && target[targetlen - 1] == '/')
target[targetlen - 1] = '\0';
/*
* See if an objset ID was supplied (-d <pool>/<objset ID>).
* To disambiguate tank/100, consider the 100 as objsetID
* if -N was given, otherwise 100 is an objsetID iff
* tank/100 as a named dataset fails on lookup.
*/
objset_str = strchr(target, '/');
if (objset_str && strlen(objset_str) > 1 &&
zdb_numeric(objset_str + 1)) {
char *endptr;
errno = 0;
objset_str++;
objset_id = strtoull(objset_str, &endptr, 0);
/* dataset 0 is the same as opening the pool */
if (errno == 0 && endptr != objset_str &&
objset_id != 0) {
if (dump_opt['N'])
dataset_lookup = B_TRUE;
}
/* normal dataset name not an objset ID */
if (endptr == objset_str) {
objset_id = -1;
}
} else if (objset_str && !zdb_numeric(objset_str + 1) &&
dump_opt['N']) {
printf("Supply a numeric objset ID with -N\n");
exit(1);
}
} else {
target_pool = target;
}
if (dump_opt['e']) {
importargs_t args = { 0 };
args.paths = nsearch;
args.path = searchdirs;
args.can_be_active = B_TRUE;
libpc_handle_t lpch = {
.lpc_lib_handle = NULL,
.lpc_ops = &libzpool_config_ops,
.lpc_printerr = B_TRUE
};
error = zpool_find_config(&lpch, target_pool, &cfg, &args);
if (error == 0) {
if (nvlist_add_nvlist(cfg,
ZPOOL_LOAD_POLICY, policy) != 0) {
fatal("can't open '%s': %s",
target, strerror(ENOMEM));
}
if (dump_opt['C'] > 1) {
(void) printf("\nConfiguration for import:\n");
dump_nvlist(cfg, 8);
}
/*
* Disable the activity check to allow examination of
* active pools.
*/
error = spa_import(target_pool, cfg, NULL,
flags | ZFS_IMPORT_SKIP_MMP);
}
}
if (searchdirs != NULL) {
umem_free(searchdirs, nsearch * sizeof (char *));
searchdirs = NULL;
}
/*
* import_checkpointed_state makes the assumption that the
* target pool that we pass it is already part of the spa
* namespace. Because of that we need to make sure to call
* it always after the -e option has been processed, which
* imports the pool to the namespace if it's not in the
* cachefile.
*/
char *checkpoint_pool = NULL;
char *checkpoint_target = NULL;
if (dump_opt['k']) {
checkpoint_pool = import_checkpointed_state(target, cfg,
&checkpoint_target);
if (checkpoint_target != NULL)
target = checkpoint_target;
}
if (cfg != NULL) {
nvlist_free(cfg);
cfg = NULL;
}
if (target_pool != target)
free(target_pool);
if (error == 0) {
if (dump_opt['k'] && (target_is_spa || dump_opt['R'])) {
ASSERT(checkpoint_pool != NULL);
ASSERT(checkpoint_target == NULL);
error = spa_open(checkpoint_pool, &spa, FTAG);
if (error != 0) {
fatal("Tried to open pool \"%s\" but "
"spa_open() failed with error %d\n",
checkpoint_pool, error);
}
} else if (target_is_spa || dump_opt['R'] || dump_opt['B'] ||
objset_id == 0) {
zdb_set_skip_mmp(target);
error = spa_open_rewind(target, &spa, FTAG, policy,
NULL);
if (error) {
/*
* If we're missing the log device then
* try opening the pool after clearing the
* log state.
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(target)) != NULL &&
spa->spa_log_state == SPA_LOG_MISSING) {
spa->spa_log_state = SPA_LOG_CLEAR;
error = 0;
}
mutex_exit(&spa_namespace_lock);
if (!error) {
error = spa_open_rewind(target, &spa,
FTAG, policy, NULL);
}
}
} else if (strpbrk(target, "#") != NULL) {
dsl_pool_t *dp;
error = dsl_pool_hold(target, FTAG, &dp);
if (error != 0) {
fatal("can't dump '%s': %s", target,
strerror(error));
}
error = dump_bookmark(dp, target, B_TRUE, verbose > 1);
dsl_pool_rele(dp, FTAG);
if (error != 0) {
fatal("can't dump '%s': %s", target,
strerror(error));
}
return (error);
} else {
target_pool = strdup(target);
if (strpbrk(target, "/@") != NULL)
*strpbrk(target_pool, "/@") = '\0';
zdb_set_skip_mmp(target);
/*
* If -N was supplied, the user has indicated that
* zdb -d <pool>/<objsetID> is in effect. Otherwise
* we first assume that the dataset string is the
* dataset name. If dmu_objset_hold fails with the
* dataset string, and we have an objset_id, retry the
* lookup with the objsetID.
*/
boolean_t retry = B_TRUE;
retry_lookup:
if (dataset_lookup == B_TRUE) {
/*
* Use the supplied id to get the name
* for open_objset.
*/
error = spa_open(target_pool, &spa, FTAG);
if (error == 0) {
error = name_from_objset_id(spa,
objset_id, dsname);
spa_close(spa, FTAG);
if (error == 0)
target = dsname;
}
}
if (error == 0) {
if (objset_id > 0 && retry) {
int err = dmu_objset_hold(target, FTAG,
&os);
if (err) {
dataset_lookup = B_TRUE;
retry = B_FALSE;
goto retry_lookup;
} else {
dmu_objset_rele(os, FTAG);
}
}
error = open_objset(target, FTAG, &os);
}
if (error == 0)
spa = dmu_objset_spa(os);
free(target_pool);
}
}
nvlist_free(policy);
if (error)
fatal("can't open '%s': %s", target, strerror(error));
/*
* Set the pool failure mode to panic in order to prevent the pool
* from suspending. A suspended I/O will have no way to resume and
* can prevent the zdb(8) command from terminating as expected.
*/
if (spa != NULL)
spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
argv++;
argc--;
if (dump_opt['r']) {
error = zdb_copy_object(os, object, argv[1]);
} else if (!dump_opt['R']) {
flagbits['d'] = ZOR_FLAG_DIRECTORY;
flagbits['f'] = ZOR_FLAG_PLAIN_FILE;
flagbits['m'] = ZOR_FLAG_SPACE_MAP;
flagbits['z'] = ZOR_FLAG_ZAP;
flagbits['A'] = ZOR_FLAG_ALL_TYPES;
if (argc > 0 && dump_opt['d']) {
zopt_object_args = argc;
zopt_object_ranges = calloc(zopt_object_args,
sizeof (zopt_object_range_t));
for (unsigned i = 0; i < zopt_object_args; i++) {
int err;
const char *msg = NULL;
err = parse_object_range(argv[i],
&zopt_object_ranges[i], &msg);
if (err != 0)
fatal("Bad object or range: '%s': %s\n",
argv[i], msg ?: "");
}
} else if (argc > 0 && dump_opt['m']) {
zopt_metaslab_args = argc;
zopt_metaslab = calloc(zopt_metaslab_args,
sizeof (uint64_t));
for (unsigned i = 0; i < zopt_metaslab_args; i++) {
errno = 0;
zopt_metaslab[i] = strtoull(argv[i], NULL, 0);
if (zopt_metaslab[i] == 0 && errno != 0)
fatal("bad number %s: %s", argv[i],
strerror(errno));
}
}
if (dump_opt['B']) {
dump_backup(target, objset_id,
argc > 0 ? argv[0] : NULL);
} else if (os != NULL) {
dump_objset(os);
} else if (zopt_object_args > 0 && !dump_opt['m']) {
dump_objset(spa->spa_meta_objset);
} else {
dump_zpool(spa);
}
} else {
flagbits['b'] = ZDB_FLAG_PRINT_BLKPTR;
flagbits['c'] = ZDB_FLAG_CHECKSUM;
flagbits['d'] = ZDB_FLAG_DECOMPRESS;
flagbits['e'] = ZDB_FLAG_BSWAP;
flagbits['g'] = ZDB_FLAG_GBH;
flagbits['i'] = ZDB_FLAG_INDIRECT;
flagbits['r'] = ZDB_FLAG_RAW;
flagbits['v'] = ZDB_FLAG_VERBOSE;
for (int i = 0; i < argc; i++)
zdb_read_block(argv[i], spa);
}
if (dump_opt['k']) {
free(checkpoint_pool);
if (!target_is_spa)
free(checkpoint_target);
}
if (os != NULL) {
close_objset(os, FTAG);
} else {
spa_close(spa, FTAG);
}
fuid_table_destroy();
dump_debug_buffer();
kernel_fini();
return (error);
}
diff --git a/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c b/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c
index b07a02712295..a8d084bb4bd3 100644
--- a/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c
+++ b/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c
@@ -1,1309 +1,1307 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright 2014 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2016, 2017, Intel Corporation.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
*/
/*
* ZFS syseventd module.
*
* file origin: openzfs/usr/src/cmd/syseventd/modules/zfs_mod/zfs_mod.c
*
* The purpose of this module is to identify when devices are added to the
* system, and appropriately online or replace the affected vdevs.
*
* When a device is added to the system:
*
* 1. Search for any vdevs whose devid matches that of the newly added
* device.
*
* 2. If no vdevs are found, then search for any vdevs whose udev path
* matches that of the new device.
*
* 3. If no vdevs match by either method, then ignore the event.
*
* 4. Attempt to online the device with a flag to indicate that it should
* be unspared when resilvering completes. If this succeeds, then the
* same device was inserted and we should continue normally.
*
* 5. If the pool does not have the 'autoreplace' property set, attempt to
* online the device again without the unspare flag, which will
* generate a FMA fault.
*
* 6. If the pool has the 'autoreplace' property set, and the matching vdev
* is a whole disk, then label the new disk and attempt a 'zpool
* replace'.
*
* The module responds to EC_DEV_ADD events. The special ESC_ZFS_VDEV_CHECK
* event indicates that a device failed to open during pool load, but the
* autoreplace property was set. In this case, we deferred the associated
* FMA fault until our module had a chance to process the autoreplace logic.
* If the device could not be replaced, then the second online attempt will
* trigger the FMA fault that we skipped earlier.
*
* On Linux udev provides a disk insert for both the disk and the partition.
*/
#include <ctype.h>
#include <fcntl.h>
#include <libnvpair.h>
#include <libzfs.h>
#include <libzutil.h>
#include <limits.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <syslog.h>
#include <sys/list.h>
#include <sys/sunddi.h>
#include <sys/sysevent/eventdefs.h>
#include <sys/sysevent/dev.h>
#include <thread_pool.h>
#include <pthread.h>
#include <unistd.h>
#include <errno.h>
#include "zfs_agents.h"
#include "../zed_log.h"
#define DEV_BYID_PATH "/dev/disk/by-id/"
#define DEV_BYPATH_PATH "/dev/disk/by-path/"
#define DEV_BYVDEV_PATH "/dev/disk/by-vdev/"
typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);
libzfs_handle_t *g_zfshdl;
list_t g_pool_list; /* list of unavailable pools at initialization */
list_t g_device_list; /* list of disks with asynchronous label request */
tpool_t *g_tpool;
boolean_t g_enumeration_done;
pthread_t g_zfs_tid; /* zfs_enum_pools() thread */
typedef struct unavailpool {
zpool_handle_t *uap_zhp;
list_node_t uap_node;
} unavailpool_t;
typedef struct pendingdev {
char pd_physpath[128];
list_node_t pd_node;
} pendingdev_t;
static int
zfs_toplevel_state(zpool_handle_t *zhp)
{
nvlist_t *nvroot;
vdev_stat_t *vs;
unsigned int c;
verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
return (vs->vs_state);
}
static int
zfs_unavail_pool(zpool_handle_t *zhp, void *data)
{
zed_log_msg(LOG_INFO, "zfs_unavail_pool: examining '%s' (state %d)",
zpool_get_name(zhp), (int)zfs_toplevel_state(zhp));
if (zfs_toplevel_state(zhp) < VDEV_STATE_DEGRADED) {
unavailpool_t *uap;
uap = malloc(sizeof (unavailpool_t));
if (uap == NULL) {
perror("malloc");
exit(EXIT_FAILURE);
}
uap->uap_zhp = zhp;
list_insert_tail((list_t *)data, uap);
} else {
zpool_close(zhp);
}
return (0);
}
/*
* Two stage replace on Linux
* since we get disk notifications
* we can wait for partitioned disk slice to show up!
*
* First stage tags the disk, initiates async partitioning, and returns
* Second stage finds the tag and proceeds to ZFS labeling/replace
*
* disk-add --> label-disk + tag-disk --> partition-add --> zpool_vdev_attach
*
* 1. physical match with no fs, no partition
* tag it top, partition disk
*
* 2. physical match again, see partition and tag
*
*/
/*
* The device associated with the given vdev (either by devid or physical path)
* has been added to the system. If 'isdisk' is set, then we only attempt a
* replacement if it's a whole disk. This also implies that we should label the
* disk first.
*
* First, we attempt to online the device (making sure to undo any spare
* operation when finished). If this succeeds, then we're done. If it fails,
* and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
* but that the label was not what we expected. If the 'autoreplace' property
* is enabled, then we relabel the disk (if specified), and attempt a 'zpool
* replace'. If the online is successful, but the new state is something else
* (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
* race, and we should avoid attempting to relabel the disk.
*
* Also can arrive here from a ESC_ZFS_VDEV_CHECK event
*/
static void
zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
{
const char *path;
vdev_state_t newstate;
nvlist_t *nvroot, *newvd;
pendingdev_t *device;
uint64_t wholedisk = 0ULL;
uint64_t offline = 0ULL, faulted = 0ULL;
uint64_t guid = 0ULL;
uint64_t is_spare = 0;
const char *physpath = NULL, *new_devid = NULL, *enc_sysfs_path = NULL;
char rawpath[PATH_MAX], fullpath[PATH_MAX];
char devpath[PATH_MAX];
int ret;
int online_flag = ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE;
boolean_t is_sd = B_FALSE;
boolean_t is_mpath_wholedisk = B_FALSE;
uint_t c;
vdev_stat_t *vs;
if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
return;
/* Skip healthy disks */
verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
if (vs->vs_state == VDEV_STATE_HEALTHY) {
zed_log_msg(LOG_INFO, "%s: %s is already healthy, skip it.",
__func__, path);
return;
}
(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
&enc_sysfs_path);
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_FAULTED, &faulted);
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_IS_SPARE, &is_spare);
/*
* Special case:
*
* We've seen times where a disk won't have a ZPOOL_CONFIG_PHYS_PATH
* entry in their config. For example, on this force-faulted disk:
*
* children[0]:
* type: 'disk'
* id: 0
* guid: 14309659774640089719
* path: '/dev/disk/by-vdev/L28'
* whole_disk: 0
* DTL: 654
* create_txg: 4
* com.delphix:vdev_zap_leaf: 1161
* faulted: 1
* aux_state: 'external'
* children[1]:
* type: 'disk'
* id: 1
* guid: 16002508084177980912
* path: '/dev/disk/by-vdev/L29'
* devid: 'dm-uuid-mpath-35000c500a61d68a3'
* phys_path: 'L29'
* vdev_enc_sysfs_path: '/sys/class/enclosure/0:0:1:0/SLOT 30 32'
* whole_disk: 0
* DTL: 1028
* create_txg: 4
* com.delphix:vdev_zap_leaf: 131
*
* If the disk's path is a /dev/disk/by-vdev/ path, then we can infer
* the ZPOOL_CONFIG_PHYS_PATH from the by-vdev disk name.
*/
if (physpath == NULL && path != NULL) {
/* If path begins with "/dev/disk/by-vdev/" ... */
if (strncmp(path, DEV_BYVDEV_PATH,
strlen(DEV_BYVDEV_PATH)) == 0) {
/* Set physpath to the char after "/dev/disk/by-vdev" */
physpath = &path[strlen(DEV_BYVDEV_PATH)];
}
}
/*
* We don't want to autoreplace offlined disks. However, we do want to
* replace force-faulted disks (`zpool offline -f`). Force-faulted
* disks have both offline=1 and faulted=1 in the nvlist.
*/
if (offline && !faulted) {
zed_log_msg(LOG_INFO, "%s: %s is offline, skip autoreplace",
__func__, path);
return;
}
is_mpath_wholedisk = is_mpath_whole_disk(path);
zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
" %s blank disk, %s mpath blank disk, %s labeled, enc sysfs '%s', "
"(guid %llu)",
zpool_get_name(zhp), path,
physpath ? physpath : "NULL",
wholedisk ? "is" : "not",
is_mpath_wholedisk? "is" : "not",
labeled ? "is" : "not",
enc_sysfs_path,
(long long unsigned int)guid);
/*
* The VDEV guid is preferred for identification (gets passed in path)
*/
if (guid != 0) {
(void) snprintf(fullpath, sizeof (fullpath), "%llu",
(long long unsigned int)guid);
} else {
/*
* otherwise use path sans partition suffix for whole disks
*/
(void) strlcpy(fullpath, path, sizeof (fullpath));
if (wholedisk) {
char *spath = zfs_strip_partition(fullpath);
if (!spath) {
zed_log_msg(LOG_INFO, "%s: Can't alloc",
__func__);
return;
}
(void) strlcpy(fullpath, spath, sizeof (fullpath));
free(spath);
}
}
if (is_spare)
online_flag |= ZFS_ONLINE_SPARE;
/*
* Attempt to online the device.
*/
if (zpool_vdev_online(zhp, fullpath, online_flag, &newstate) == 0 &&
(newstate == VDEV_STATE_HEALTHY ||
newstate == VDEV_STATE_DEGRADED)) {
zed_log_msg(LOG_INFO,
" zpool_vdev_online: vdev '%s' ('%s') is "
"%s", fullpath, physpath, (newstate == VDEV_STATE_HEALTHY) ?
"HEALTHY" : "DEGRADED");
return;
}
/*
* vdev_id alias rule for using scsi_debug devices (FMA automated
* testing)
*/
if (physpath != NULL && strcmp("scsidebug", physpath) == 0)
is_sd = B_TRUE;
/*
* If the pool doesn't have the autoreplace property set, then use
* vdev online to trigger a FMA fault by posting an ereport.
*/
if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
!(wholedisk || is_mpath_wholedisk) || (physpath == NULL)) {
(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
&newstate);
zed_log_msg(LOG_INFO, "Pool's autoreplace is not enabled or "
"not a blank disk for '%s' ('%s')", fullpath,
physpath);
return;
}
/*
* Convert physical path into its current device node. Rawpath
* needs to be /dev/disk/by-vdev for a scsi_debug device since
* /dev/disk/by-path will not be present.
*/
(void) snprintf(rawpath, sizeof (rawpath), "%s%s",
is_sd ? DEV_BYVDEV_PATH : DEV_BYPATH_PATH, physpath);
if (realpath(rawpath, devpath) == NULL && !is_mpath_wholedisk) {
zed_log_msg(LOG_INFO, " realpath: %s failed (%s)",
rawpath, strerror(errno));
(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
&newstate);
zed_log_msg(LOG_INFO, " zpool_vdev_online: %s FORCEFAULT (%s)",
fullpath, libzfs_error_description(g_zfshdl));
return;
}
/* Only autoreplace bad disks */
if ((vs->vs_state != VDEV_STATE_DEGRADED) &&
(vs->vs_state != VDEV_STATE_FAULTED) &&
(vs->vs_state != VDEV_STATE_CANT_OPEN)) {
zed_log_msg(LOG_INFO, " not autoreplacing since disk isn't in "
"a bad state (currently %llu)", vs->vs_state);
return;
}
nvlist_lookup_string(vdev, "new_devid", &new_devid);
if (is_mpath_wholedisk) {
/* Don't label device mapper or multipath disks. */
} else if (!labeled) {
/*
* we're auto-replacing a raw disk, so label it first
*/
char *leafname;
/*
* If this is a request to label a whole disk, then attempt to
* write out the label. Before we can label the disk, we need
* to map the physical string that was matched on to the under
* lying device node.
*
* If any part of this process fails, then do a force online
* to trigger a ZFS fault for the device (and any hot spare
* replacement).
*/
leafname = strrchr(devpath, '/') + 1;
/*
* If this is a request to label a whole disk, then attempt to
* write out the label.
*/
if (zpool_label_disk(g_zfshdl, zhp, leafname) != 0) {
zed_log_msg(LOG_INFO, " zpool_label_disk: could not "
"label '%s' (%s)", leafname,
libzfs_error_description(g_zfshdl));
(void) zpool_vdev_online(zhp, fullpath,
ZFS_ONLINE_FORCEFAULT, &newstate);
return;
}
/*
* The disk labeling is asynchronous on Linux. Just record
* this label request and return as there will be another
* disk add event for the partition after the labeling is
* completed.
*/
device = malloc(sizeof (pendingdev_t));
if (device == NULL) {
perror("malloc");
exit(EXIT_FAILURE);
}
(void) strlcpy(device->pd_physpath, physpath,
sizeof (device->pd_physpath));
list_insert_tail(&g_device_list, device);
zed_log_msg(LOG_INFO, " zpool_label_disk: async '%s' (%llu)",
leafname, (u_longlong_t)guid);
return; /* resumes at EC_DEV_ADD.ESC_DISK for partition */
} else /* labeled */ {
boolean_t found = B_FALSE;
/*
* match up with request above to label the disk
*/
for (device = list_head(&g_device_list); device != NULL;
device = list_next(&g_device_list, device)) {
if (strcmp(physpath, device->pd_physpath) == 0) {
list_remove(&g_device_list, device);
free(device);
found = B_TRUE;
break;
}
zed_log_msg(LOG_INFO, "zpool_label_disk: %s != %s",
physpath, device->pd_physpath);
}
if (!found) {
/* unexpected partition slice encountered */
zed_log_msg(LOG_INFO, "labeled disk %s unexpected here",
fullpath);
(void) zpool_vdev_online(zhp, fullpath,
ZFS_ONLINE_FORCEFAULT, &newstate);
return;
}
zed_log_msg(LOG_INFO, " zpool_label_disk: resume '%s' (%llu)",
physpath, (u_longlong_t)guid);
(void) snprintf(devpath, sizeof (devpath), "%s%s",
DEV_BYID_PATH, new_devid);
}
/*
* Construct the root vdev to pass to zpool_vdev_attach(). While adding
* the entire vdev structure is harmless, we construct a reduced set of
* path/physpath/wholedisk to keep it simple.
*/
if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {
zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
return;
}
if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
nvlist_free(nvroot);
return;
}
if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||
(physpath != NULL && nvlist_add_string(newvd,
ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
(enc_sysfs_path != NULL && nvlist_add_string(newvd,
ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, enc_sysfs_path) != 0) ||
nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t **)&newvd, 1) != 0) {
zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");
nvlist_free(newvd);
nvlist_free(nvroot);
return;
}
nvlist_free(newvd);
/*
* Wait for udev to verify the links exist, then auto-replace
* the leaf disk at same physical location.
*/
if (zpool_label_disk_wait(path, 3000) != 0) {
zed_log_msg(LOG_WARNING, "zfs_mod: expected replacement "
"disk %s is missing", path);
nvlist_free(nvroot);
return;
}
/*
* Prefer sequential resilvering when supported (mirrors and dRAID),
* otherwise fallback to a traditional healing resilver.
*/
ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE, B_TRUE);
if (ret != 0) {
ret = zpool_vdev_attach(zhp, fullpath, path, nvroot,
B_TRUE, B_FALSE);
}
zed_log_msg(LOG_INFO, " zpool_vdev_replace: %s with %s (%s)",
fullpath, path, (ret == 0) ? "no errors" :
libzfs_error_description(g_zfshdl));
nvlist_free(nvroot);
}
/*
* Utility functions to find a vdev matching given criteria.
*/
typedef struct dev_data {
const char *dd_compare;
const char *dd_prop;
zfs_process_func_t dd_func;
boolean_t dd_found;
boolean_t dd_islabeled;
uint64_t dd_pool_guid;
uint64_t dd_vdev_guid;
uint64_t dd_new_vdev_guid;
const char *dd_new_devid;
uint64_t dd_num_spares;
} dev_data_t;
static void
zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
{
dev_data_t *dp = data;
const char *path = NULL;
uint_t c, children;
nvlist_t **child;
uint64_t guid = 0;
uint64_t isspare = 0;
/*
* First iterate over any children.
*/
if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++)
zfs_iter_vdev(zhp, child[c], data);
}
/*
* Iterate over any spares and cache devices
*/
if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
for (c = 0; c < children; c++)
zfs_iter_vdev(zhp, child[c], data);
}
if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0) {
for (c = 0; c < children; c++)
zfs_iter_vdev(zhp, child[c], data);
}
/* once a vdev was matched and processed there is nothing left to do */
if (dp->dd_found && dp->dd_num_spares == 0)
return;
(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID, &guid);
/*
* Match by GUID if available otherwise fallback to devid or physical
*/
if (dp->dd_vdev_guid != 0) {
if (guid != dp->dd_vdev_guid)
return;
zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched on %llu", guid);
dp->dd_found = B_TRUE;
} else if (dp->dd_compare != NULL) {
/*
* NOTE: On Linux there is an event for partition, so unlike
* illumos, substring matching is not required to accommodate
* the partition suffix. An exact match will be present in
* the dp->dd_compare value.
* If the attached disk already contains a vdev GUID, it means
* the disk is not clean. In such a scenario, the physical path
* would be a match that makes the disk faulted when trying to
* online it. So, we would only want to proceed if either GUID
* matches with the last attached disk or the disk is in clean
* state.
*/
if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
strcmp(dp->dd_compare, path) != 0) {
- zed_log_msg(LOG_INFO, " %s: no match (%s != vdev %s)",
- __func__, dp->dd_compare, path);
return;
}
if (dp->dd_new_vdev_guid != 0 && dp->dd_new_vdev_guid != guid) {
zed_log_msg(LOG_INFO, " %s: no match (GUID:%llu"
" != vdev GUID:%llu)", __func__,
dp->dd_new_vdev_guid, guid);
return;
}
zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched %s on %s",
dp->dd_prop, path);
dp->dd_found = B_TRUE;
/* pass the new devid for use by replacing code */
if (dp->dd_new_devid != NULL) {
(void) nvlist_add_string(nvl, "new_devid",
dp->dd_new_devid);
}
}
if (dp->dd_found == B_TRUE && nvlist_lookup_uint64(nvl,
ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
dp->dd_num_spares++;
(dp->dd_func)(zhp, nvl, dp->dd_islabeled);
}
static void
zfs_enable_ds(void *arg)
{
unavailpool_t *pool = (unavailpool_t *)arg;
(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0);
zpool_close(pool->uap_zhp);
free(pool);
}
static int
zfs_iter_pool(zpool_handle_t *zhp, void *data)
{
nvlist_t *config, *nvl;
dev_data_t *dp = data;
uint64_t pool_guid;
unavailpool_t *pool;
zed_log_msg(LOG_INFO, "zfs_iter_pool: evaluating vdevs on %s (by %s)",
zpool_get_name(zhp), dp->dd_vdev_guid ? "GUID" : dp->dd_prop);
/*
* For each vdev in this pool, look for a match to apply dd_func
*/
if ((config = zpool_get_config(zhp, NULL)) != NULL) {
if (dp->dd_pool_guid == 0 ||
(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
(void) nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &nvl);
zfs_iter_vdev(zhp, nvl, data);
}
} else {
zed_log_msg(LOG_INFO, "%s: no config\n", __func__);
}
/*
* if this pool was originally unavailable,
* then enable its datasets asynchronously
*/
if (g_enumeration_done) {
for (pool = list_head(&g_pool_list); pool != NULL;
pool = list_next(&g_pool_list, pool)) {
if (strcmp(zpool_get_name(zhp),
zpool_get_name(pool->uap_zhp)))
continue;
if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
list_remove(&g_pool_list, pool);
(void) tpool_dispatch(g_tpool, zfs_enable_ds,
pool);
break;
}
}
}
zpool_close(zhp);
/* cease iteration after a match */
return (dp->dd_found && dp->dd_num_spares == 0);
}
/*
* Given a physical device location, iterate over all
* (pool, vdev) pairs which correspond to that location.
*/
static boolean_t
devphys_iter(const char *physical, const char *devid, zfs_process_func_t func,
boolean_t is_slice, uint64_t new_vdev_guid)
{
dev_data_t data = { 0 };
data.dd_compare = physical;
data.dd_func = func;
data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;
data.dd_found = B_FALSE;
data.dd_islabeled = is_slice;
data.dd_new_devid = devid; /* used by auto replace code */
data.dd_new_vdev_guid = new_vdev_guid;
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
return (data.dd_found);
}
/*
* Given a device identifier, find any vdevs with a matching by-vdev
* path. Normally we shouldn't need this as the comparison would be
* made earlier in the devphys_iter(). For example, if we were replacing
* /dev/disk/by-vdev/L28, normally devphys_iter() would match the
* ZPOOL_CONFIG_PHYS_PATH of "L28" from the old disk config to "L28"
* of the new disk config. However, we've seen cases where
* ZPOOL_CONFIG_PHYS_PATH was not in the config for the old disk. Here's
* an example of a real 2-disk mirror pool where one disk was force
* faulted:
*
* com.delphix:vdev_zap_top: 129
* children[0]:
* type: 'disk'
* id: 0
* guid: 14309659774640089719
* path: '/dev/disk/by-vdev/L28'
* whole_disk: 0
* DTL: 654
* create_txg: 4
* com.delphix:vdev_zap_leaf: 1161
* faulted: 1
* aux_state: 'external'
* children[1]:
* type: 'disk'
* id: 1
* guid: 16002508084177980912
* path: '/dev/disk/by-vdev/L29'
* devid: 'dm-uuid-mpath-35000c500a61d68a3'
* phys_path: 'L29'
* vdev_enc_sysfs_path: '/sys/class/enclosure/0:0:1:0/SLOT 30 32'
* whole_disk: 0
* DTL: 1028
* create_txg: 4
* com.delphix:vdev_zap_leaf: 131
*
* So in the case above, the only thing we could compare is the path.
*
* We can do this because we assume by-vdev paths are authoritative as physical
* paths. We could not assume this for normal paths like /dev/sda since the
* physical location /dev/sda points to could change over time.
*/
static boolean_t
by_vdev_path_iter(const char *by_vdev_path, const char *devid,
zfs_process_func_t func, boolean_t is_slice)
{
dev_data_t data = { 0 };
data.dd_compare = by_vdev_path;
data.dd_func = func;
data.dd_prop = ZPOOL_CONFIG_PATH;
data.dd_found = B_FALSE;
data.dd_islabeled = is_slice;
data.dd_new_devid = devid;
if (strncmp(by_vdev_path, DEV_BYVDEV_PATH,
strlen(DEV_BYVDEV_PATH)) != 0) {
/* by_vdev_path doesn't start with "/dev/disk/by-vdev/" */
return (B_FALSE);
}
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
return (data.dd_found);
}
/*
* Given a device identifier, find any vdevs with a matching devid.
* On Linux we can match devid directly which is always a whole disk.
*/
static boolean_t
devid_iter(const char *devid, zfs_process_func_t func, boolean_t is_slice)
{
dev_data_t data = { 0 };
data.dd_compare = devid;
data.dd_func = func;
data.dd_prop = ZPOOL_CONFIG_DEVID;
data.dd_found = B_FALSE;
data.dd_islabeled = is_slice;
data.dd_new_devid = devid;
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
return (data.dd_found);
}
/*
* Given a device guid, find any vdevs with a matching guid.
*/
static boolean_t
guid_iter(uint64_t pool_guid, uint64_t vdev_guid, const char *devid,
zfs_process_func_t func, boolean_t is_slice)
{
dev_data_t data = { 0 };
data.dd_func = func;
data.dd_found = B_FALSE;
data.dd_pool_guid = pool_guid;
data.dd_vdev_guid = vdev_guid;
data.dd_islabeled = is_slice;
data.dd_new_devid = devid;
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
return (data.dd_found);
}
/*
* Handle a EC_DEV_ADD.ESC_DISK event.
*
* illumos
* Expects: DEV_PHYS_PATH string in schema
* Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
*
* path: '/dev/dsk/c0t1d0s0' (persistent)
* devid: 'id1,sd@SATA_____Hitachi_HDS72101______JP2940HZ3H74MC/a'
* phys_path: '/pci@0,0/pci103c,1609@11/disk@1,0:a'
*
* linux
* provides: DEV_PHYS_PATH and DEV_IDENTIFIER strings in schema
* Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
*
* path: '/dev/sdc1' (not persistent)
* devid: 'ata-SAMSUNG_HD204UI_S2HGJD2Z805891-part1'
* phys_path: 'pci-0000:04:00.0-sas-0x4433221106000000-lun-0'
*/
static int
zfs_deliver_add(nvlist_t *nvl)
{
const char *devpath = NULL, *devid = NULL;
uint64_t pool_guid = 0, vdev_guid = 0;
boolean_t is_slice;
/*
* Expecting a devid string and an optional physical location and guid
*/
if (nvlist_lookup_string(nvl, DEV_IDENTIFIER, &devid) != 0) {
zed_log_msg(LOG_INFO, "%s: no dev identifier\n", __func__);
return (-1);
}
(void) nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devpath);
(void) nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID, &pool_guid);
(void) nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &vdev_guid);
is_slice = (nvlist_lookup_boolean(nvl, DEV_IS_PART) == 0);
zed_log_msg(LOG_INFO, "zfs_deliver_add: adding %s (%s) (is_slice %d)",
devid, devpath ? devpath : "NULL", is_slice);
/*
* Iterate over all vdevs looking for a match in the following order:
* 1. ZPOOL_CONFIG_DEVID (identifies the unique disk)
* 2. ZPOOL_CONFIG_PHYS_PATH (identifies disk physical location).
* 3. ZPOOL_CONFIG_GUID (identifies unique vdev).
* 4. ZPOOL_CONFIG_PATH for /dev/disk/by-vdev devices only (since
* by-vdev paths represent physical paths).
*/
if (devid_iter(devid, zfs_process_add, is_slice))
return (0);
if (devpath != NULL && devphys_iter(devpath, devid, zfs_process_add,
is_slice, vdev_guid))
return (0);
if (vdev_guid != 0)
(void) guid_iter(pool_guid, vdev_guid, devid, zfs_process_add,
is_slice);
if (devpath != NULL) {
/* Can we match a /dev/disk/by-vdev/ path? */
char by_vdev_path[MAXPATHLEN];
snprintf(by_vdev_path, sizeof (by_vdev_path),
"/dev/disk/by-vdev/%s", devpath);
if (by_vdev_path_iter(by_vdev_path, devid, zfs_process_add,
is_slice))
return (0);
}
return (0);
}
/*
* Called when we receive a VDEV_CHECK event, which indicates a device could not
* be opened during initial pool open, but the autoreplace property was set on
* the pool. In this case, we treat it as if it were an add event.
*/
static int
zfs_deliver_check(nvlist_t *nvl)
{
dev_data_t data = { 0 };
if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,
&data.dd_pool_guid) != 0 ||
nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,
&data.dd_vdev_guid) != 0 ||
data.dd_vdev_guid == 0)
return (0);
zed_log_msg(LOG_INFO, "zfs_deliver_check: pool '%llu', vdev %llu",
data.dd_pool_guid, data.dd_vdev_guid);
data.dd_func = zfs_process_add;
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
return (0);
}
/*
* Given a path to a vdev, lookup the vdev's physical size from its
* config nvlist.
*
* Returns the vdev's physical size in bytes on success, 0 on error.
*/
static uint64_t
vdev_size_from_config(zpool_handle_t *zhp, const char *vdev_path)
{
nvlist_t *nvl = NULL;
boolean_t avail_spare, l2cache, log;
vdev_stat_t *vs = NULL;
uint_t c;
nvl = zpool_find_vdev(zhp, vdev_path, &avail_spare, &l2cache, &log);
if (!nvl)
return (0);
verify(nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
if (!vs) {
zed_log_msg(LOG_INFO, "%s: no nvlist for '%s'", __func__,
vdev_path);
return (0);
}
return (vs->vs_pspace);
}
/*
* Given a path to a vdev, lookup if the vdev is a "whole disk" in the
* config nvlist. "whole disk" means that ZFS was passed a whole disk
* at pool creation time, which it partitioned up and has full control over.
* Thus a partition with wholedisk=1 set tells us that zfs created the
* partition at creation time. A partition without whole disk set would have
* been created by externally (like with fdisk) and passed to ZFS.
*
* Returns the whole disk value (either 0 or 1).
*/
static uint64_t
vdev_whole_disk_from_config(zpool_handle_t *zhp, const char *vdev_path)
{
nvlist_t *nvl = NULL;
boolean_t avail_spare, l2cache, log;
uint64_t wholedisk = 0;
nvl = zpool_find_vdev(zhp, vdev_path, &avail_spare, &l2cache, &log);
if (!nvl)
return (0);
(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
return (wholedisk);
}
/*
* If the device size grew more than 1% then return true.
*/
#define DEVICE_GREW(oldsize, newsize) \
((newsize > oldsize) && \
((newsize / (newsize - oldsize)) <= 100))
static int
zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
{
boolean_t avail_spare, l2cache;
nvlist_t *udev_nvl = data;
nvlist_t *tgt;
int error;
const char *tmp_devname;
char devname[MAXPATHLEN] = "";
uint64_t guid;
if (nvlist_lookup_uint64(udev_nvl, ZFS_EV_VDEV_GUID, &guid) == 0) {
sprintf(devname, "%llu", (u_longlong_t)guid);
} else if (nvlist_lookup_string(udev_nvl, DEV_PHYS_PATH,
&tmp_devname) == 0) {
strlcpy(devname, tmp_devname, MAXPATHLEN);
zfs_append_partition(devname, MAXPATHLEN);
} else {
zed_log_msg(LOG_INFO, "%s: no guid or physpath", __func__);
}
zed_log_msg(LOG_INFO, "zfsdle_vdev_online: searching for '%s' in '%s'",
devname, zpool_get_name(zhp));
if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,
&avail_spare, &l2cache, NULL)) != NULL) {
const char *path;
char fullpath[MAXPATHLEN];
uint64_t wholedisk = 0;
error = nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &path);
if (error) {
zpool_close(zhp);
return (0);
}
(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk);
if (wholedisk) {
char *tmp;
path = strrchr(path, '/');
if (path != NULL) {
tmp = zfs_strip_partition(path + 1);
if (tmp == NULL) {
zpool_close(zhp);
return (0);
}
} else {
zpool_close(zhp);
return (0);
}
(void) strlcpy(fullpath, tmp, sizeof (fullpath));
free(tmp);
/*
* We need to reopen the pool associated with this
* device so that the kernel can update the size of
* the expanded device. When expanding there is no
* need to restart the scrub from the beginning.
*/
boolean_t scrub_restart = B_FALSE;
(void) zpool_reopen_one(zhp, &scrub_restart);
} else {
(void) strlcpy(fullpath, path, sizeof (fullpath));
}
if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
vdev_state_t newstate;
if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
/*
* If this disk size has not changed, then
* there's no need to do an autoexpand. To
* check we look at the disk's size in its
* config, and compare it to the disk size
* that udev is reporting.
*/
uint64_t udev_size = 0, conf_size = 0,
wholedisk = 0, udev_parent_size = 0;
/*
* Get the size of our disk that udev is
* reporting.
*/
if (nvlist_lookup_uint64(udev_nvl, DEV_SIZE,
&udev_size) != 0) {
udev_size = 0;
}
/*
* Get the size of our disk's parent device
* from udev (where sda1's parent is sda).
*/
if (nvlist_lookup_uint64(udev_nvl,
DEV_PARENT_SIZE, &udev_parent_size) != 0) {
udev_parent_size = 0;
}
conf_size = vdev_size_from_config(zhp,
fullpath);
wholedisk = vdev_whole_disk_from_config(zhp,
fullpath);
/*
* Only attempt an autoexpand if the vdev size
* changed. There are two different cases
* to consider.
*
* 1. wholedisk=1
* If you do a 'zpool create' on a whole disk
* (like /dev/sda), then zfs will create
* partitions on the disk (like /dev/sda1). In
* that case, wholedisk=1 will be set in the
* partition's nvlist config. So zed will need
* to see if your parent device (/dev/sda)
* expanded in size, and if so, then attempt
* the autoexpand.
*
* 2. wholedisk=0
* If you do a 'zpool create' on an existing
* partition, or a device that doesn't allow
* partitions, then wholedisk=0, and you will
* simply need to check if the device itself
* expanded in size.
*/
if (DEVICE_GREW(conf_size, udev_size) ||
(wholedisk && DEVICE_GREW(conf_size,
udev_parent_size))) {
error = zpool_vdev_online(zhp, fullpath,
0, &newstate);
zed_log_msg(LOG_INFO,
"%s: autoexpanding '%s' from %llu"
" to %llu bytes in pool '%s': %d",
__func__, fullpath, conf_size,
MAX(udev_size, udev_parent_size),
zpool_get_name(zhp), error);
}
}
}
zpool_close(zhp);
return (1);
}
zpool_close(zhp);
return (0);
}
/*
* This function handles the ESC_DEV_DLE device change event. Use the
* provided vdev guid when looking up a disk or partition, when the guid
* is not present assume the entire disk is owned by ZFS and append the
* expected -part1 partition information then lookup by physical path.
*/
static int
zfs_deliver_dle(nvlist_t *nvl)
{
const char *devname;
char name[MAXPATHLEN];
uint64_t guid;
if (nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &guid) == 0) {
sprintf(name, "%llu", (u_longlong_t)guid);
} else if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) == 0) {
strlcpy(name, devname, MAXPATHLEN);
zfs_append_partition(name, MAXPATHLEN);
} else {
sprintf(name, "unknown");
zed_log_msg(LOG_INFO, "zfs_deliver_dle: no guid or physpath");
}
if (zpool_iter(g_zfshdl, zfsdle_vdev_online, nvl) != 1) {
zed_log_msg(LOG_INFO, "zfs_deliver_dle: device '%s' not "
"found", name);
return (1);
}
return (0);
}
/*
* syseventd daemon module event handler
*
* Handles syseventd daemon zfs device related events:
*
* EC_DEV_ADD.ESC_DISK
* EC_DEV_STATUS.ESC_DEV_DLE
* EC_ZFS.ESC_ZFS_VDEV_CHECK
*
* Note: assumes only one thread active at a time (not thread safe)
*/
static int
zfs_slm_deliver_event(const char *class, const char *subclass, nvlist_t *nvl)
{
int ret;
boolean_t is_check = B_FALSE, is_dle = B_FALSE;
if (strcmp(class, EC_DEV_ADD) == 0) {
/*
* We're mainly interested in disk additions, but we also listen
* for new loop devices, to allow for simplified testing.
*/
if (strcmp(subclass, ESC_DISK) != 0 &&
strcmp(subclass, ESC_LOFI) != 0)
return (0);
is_check = B_FALSE;
} else if (strcmp(class, EC_ZFS) == 0 &&
strcmp(subclass, ESC_ZFS_VDEV_CHECK) == 0) {
/*
* This event signifies that a device failed to open
* during pool load, but the 'autoreplace' property was
* set, so we should pretend it's just been added.
*/
is_check = B_TRUE;
} else if (strcmp(class, EC_DEV_STATUS) == 0 &&
strcmp(subclass, ESC_DEV_DLE) == 0) {
is_dle = B_TRUE;
} else {
return (0);
}
if (is_dle)
ret = zfs_deliver_dle(nvl);
else if (is_check)
ret = zfs_deliver_check(nvl);
else
ret = zfs_deliver_add(nvl);
return (ret);
}
static void *
zfs_enum_pools(void *arg)
{
(void) arg;
(void) zpool_iter(g_zfshdl, zfs_unavail_pool, (void *)&g_pool_list);
/*
* Linux - instead of using a thread pool, each list entry
* will spawn a thread when an unavailable pool transitions
* to available. zfs_slm_fini will wait for these threads.
*/
g_enumeration_done = B_TRUE;
return (NULL);
}
/*
* called from zed daemon at startup
*
* sent messages from zevents or udev monitor
*
* For now, each agent has its own libzfs instance
*/
int
zfs_slm_init(void)
{
if ((g_zfshdl = libzfs_init()) == NULL)
return (-1);
/*
* collect a list of unavailable pools (asynchronously,
* since this can take a while)
*/
list_create(&g_pool_list, sizeof (struct unavailpool),
offsetof(struct unavailpool, uap_node));
if (pthread_create(&g_zfs_tid, NULL, zfs_enum_pools, NULL) != 0) {
list_destroy(&g_pool_list);
libzfs_fini(g_zfshdl);
return (-1);
}
pthread_setname_np(g_zfs_tid, "enum-pools");
list_create(&g_device_list, sizeof (struct pendingdev),
offsetof(struct pendingdev, pd_node));
return (0);
}
void
zfs_slm_fini(void)
{
unavailpool_t *pool;
pendingdev_t *device;
/* wait for zfs_enum_pools thread to complete */
(void) pthread_join(g_zfs_tid, NULL);
/* destroy the thread pool */
if (g_tpool != NULL) {
tpool_wait(g_tpool);
tpool_destroy(g_tpool);
}
while ((pool = list_remove_head(&g_pool_list)) != NULL) {
zpool_close(pool->uap_zhp);
free(pool);
}
list_destroy(&g_pool_list);
while ((device = list_remove_head(&g_device_list)) != NULL)
free(device);
list_destroy(&g_device_list);
libzfs_fini(g_zfshdl);
}
void
zfs_slm_event(const char *class, const char *subclass, nvlist_t *nvl)
{
zed_log_msg(LOG_INFO, "zfs_slm_event: %s.%s", class, subclass);
(void) zfs_slm_deliver_event(class, subclass, nvl);
}
diff --git a/sys/contrib/openzfs/cmd/zed/agents/zfs_retire.c b/sys/contrib/openzfs/cmd/zed/agents/zfs_retire.c
index f83ae09259ab..a0e377a4a0c8 100644
--- a/sys/contrib/openzfs/cmd/zed/agents/zfs_retire.c
+++ b/sys/contrib/openzfs/cmd/zed/agents/zfs_retire.c
@@ -1,663 +1,668 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
*
* Copyright (c) 2016, Intel Corporation.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
*/
/*
* The ZFS retire agent is responsible for managing hot spares across all pools.
* When we see a device fault or a device removal, we try to open the associated
* pool and look for any hot spares. We iterate over any available hot spares
* and attempt a 'zpool replace' for each one.
*
* For vdevs diagnosed as faulty, the agent is also responsible for proactively
* marking the vdev FAULTY (for I/O errors) or DEGRADED (for checksum errors).
*/
#include <sys/fs/zfs.h>
#include <sys/fm/protocol.h>
#include <sys/fm/fs/zfs.h>
#include <libzutil.h>
#include <libzfs.h>
#include <string.h>
#include <libgen.h>
#include "zfs_agents.h"
#include "fmd_api.h"
typedef struct zfs_retire_repaired {
struct zfs_retire_repaired *zrr_next;
uint64_t zrr_pool;
uint64_t zrr_vdev;
} zfs_retire_repaired_t;
typedef struct zfs_retire_data {
libzfs_handle_t *zrd_hdl;
zfs_retire_repaired_t *zrd_repaired;
} zfs_retire_data_t;
static void
zfs_retire_clear_data(fmd_hdl_t *hdl, zfs_retire_data_t *zdp)
{
zfs_retire_repaired_t *zrp;
while ((zrp = zdp->zrd_repaired) != NULL) {
zdp->zrd_repaired = zrp->zrr_next;
fmd_hdl_free(hdl, zrp, sizeof (zfs_retire_repaired_t));
}
}
/*
* Find a pool with a matching GUID.
*/
typedef struct find_cbdata {
uint64_t cb_guid;
zpool_handle_t *cb_zhp;
nvlist_t *cb_vdev;
uint64_t cb_vdev_guid;
uint64_t cb_num_spares;
} find_cbdata_t;
static int
find_pool(zpool_handle_t *zhp, void *data)
{
find_cbdata_t *cbp = data;
if (cbp->cb_guid ==
zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL)) {
cbp->cb_zhp = zhp;
return (1);
}
zpool_close(zhp);
return (0);
}
/*
* Find a vdev within a tree with a matching GUID.
*/
static nvlist_t *
find_vdev(libzfs_handle_t *zhdl, nvlist_t *nv, uint64_t search_guid)
{
uint64_t guid;
nvlist_t **child;
uint_t c, children;
nvlist_t *ret;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
guid == search_guid) {
fmd_hdl_debug(fmd_module_hdl("zfs-retire"),
"matched vdev %llu", guid);
return (nv);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return (NULL);
for (c = 0; c < children; c++) {
if ((ret = find_vdev(zhdl, child[c], search_guid)) != NULL)
return (ret);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) != 0)
return (NULL);
for (c = 0; c < children; c++) {
if ((ret = find_vdev(zhdl, child[c], search_guid)) != NULL)
return (ret);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) != 0)
return (NULL);
for (c = 0; c < children; c++) {
if ((ret = find_vdev(zhdl, child[c], search_guid)) != NULL)
return (ret);
}
return (NULL);
}
static int
remove_spares(zpool_handle_t *zhp, void *data)
{
nvlist_t *config, *nvroot;
nvlist_t **spares;
uint_t nspares;
char *devname;
find_cbdata_t *cbp = data;
uint64_t spareguid = 0;
vdev_stat_t *vs;
unsigned int c;
config = zpool_get_config(zhp, NULL);
if (nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &nvroot) != 0) {
zpool_close(zhp);
return (0);
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) != 0) {
zpool_close(zhp);
return (0);
}
for (int i = 0; i < nspares; i++) {
if (nvlist_lookup_uint64(spares[i], ZPOOL_CONFIG_GUID,
&spareguid) == 0 && spareguid == cbp->cb_vdev_guid) {
devname = zpool_vdev_name(NULL, zhp, spares[i],
B_FALSE);
nvlist_lookup_uint64_array(spares[i],
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &c);
if (vs->vs_state != VDEV_STATE_REMOVED &&
zpool_vdev_remove_wanted(zhp, devname) == 0)
cbp->cb_num_spares++;
break;
}
}
zpool_close(zhp);
return (0);
}
/*
* Given a vdev guid, find and remove all spares associated with it.
*/
static int
find_and_remove_spares(libzfs_handle_t *zhdl, uint64_t vdev_guid)
{
find_cbdata_t cb;
cb.cb_num_spares = 0;
cb.cb_vdev_guid = vdev_guid;
zpool_iter(zhdl, remove_spares, &cb);
return (cb.cb_num_spares);
}
/*
* Given a (pool, vdev) GUID pair, find the matching pool and vdev.
*/
static zpool_handle_t *
find_by_guid(libzfs_handle_t *zhdl, uint64_t pool_guid, uint64_t vdev_guid,
nvlist_t **vdevp)
{
find_cbdata_t cb;
zpool_handle_t *zhp;
nvlist_t *config, *nvroot;
/*
* Find the corresponding pool and make sure the vdev still exists.
*/
cb.cb_guid = pool_guid;
if (zpool_iter(zhdl, find_pool, &cb) != 1)
return (NULL);
zhp = cb.cb_zhp;
config = zpool_get_config(zhp, NULL);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) != 0) {
zpool_close(zhp);
return (NULL);
}
if (vdev_guid != 0) {
if ((*vdevp = find_vdev(zhdl, nvroot, vdev_guid)) == NULL) {
zpool_close(zhp);
return (NULL);
}
}
return (zhp);
}
/*
* Given a vdev, attempt to replace it with every known spare until one
* succeeds or we run out of devices to try.
* Return whether we were successful or not in replacing the device.
*/
static boolean_t
replace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev)
{
nvlist_t *config, *nvroot, *replacement;
nvlist_t **spares;
uint_t s, nspares;
char *dev_name;
zprop_source_t source;
int ashift;
config = zpool_get_config(zhp, NULL);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) != 0)
return (B_FALSE);
/*
* Find out if there are any hot spares available in the pool.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) != 0)
return (B_FALSE);
/*
* lookup "ashift" pool property, we may need it for the replacement
*/
ashift = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &source);
replacement = fmd_nvl_alloc(hdl, FMD_SLEEP);
(void) nvlist_add_string(replacement, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_ROOT);
dev_name = zpool_vdev_name(NULL, zhp, vdev, B_FALSE);
/*
* Try to replace each spare, ending when we successfully
* replace it.
*/
for (s = 0; s < nspares; s++) {
boolean_t rebuild = B_FALSE;
const char *spare_name, *type;
if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
&spare_name) != 0)
continue;
/* prefer sequential resilvering for distributed spares */
if ((nvlist_lookup_string(spares[s], ZPOOL_CONFIG_TYPE,
&type) == 0) && strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0)
rebuild = B_TRUE;
/* if set, add the "ashift" pool property to the spare nvlist */
if (source != ZPROP_SRC_DEFAULT)
(void) nvlist_add_uint64(spares[s],
ZPOOL_CONFIG_ASHIFT, ashift);
(void) nvlist_add_nvlist_array(replacement,
ZPOOL_CONFIG_CHILDREN, (const nvlist_t **)&spares[s], 1);
fmd_hdl_debug(hdl, "zpool_vdev_replace '%s' with spare '%s'",
dev_name, zfs_basename(spare_name));
if (zpool_vdev_attach(zhp, dev_name, spare_name,
replacement, B_TRUE, rebuild) == 0) {
free(dev_name);
nvlist_free(replacement);
return (B_TRUE);
}
}
free(dev_name);
nvlist_free(replacement);
return (B_FALSE);
}
/*
* Repair this vdev if we had diagnosed a 'fault.fs.zfs.device' and
* ASRU is now usable. ZFS has found the device to be present and
* functioning.
*/
static void
zfs_vdev_repair(fmd_hdl_t *hdl, nvlist_t *nvl)
{
zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
zfs_retire_repaired_t *zrp;
uint64_t pool_guid, vdev_guid;
if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
&pool_guid) != 0 || nvlist_lookup_uint64(nvl,
FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
return;
/*
* Before checking the state of the ASRU, go through and see if we've
* already made an attempt to repair this ASRU. This list is cleared
* whenever we receive any kind of list event, and is designed to
* prevent us from generating a feedback loop when we attempt repairs
* against a faulted pool. The problem is that checking the unusable
* state of the ASRU can involve opening the pool, which can post
* statechange events but otherwise leave the pool in the faulted
* state. This list allows us to detect when a statechange event is
* due to our own request.
*/
for (zrp = zdp->zrd_repaired; zrp != NULL; zrp = zrp->zrr_next) {
if (zrp->zrr_pool == pool_guid &&
zrp->zrr_vdev == vdev_guid)
return;
}
zrp = fmd_hdl_alloc(hdl, sizeof (zfs_retire_repaired_t), FMD_SLEEP);
zrp->zrr_next = zdp->zrd_repaired;
zrp->zrr_pool = pool_guid;
zrp->zrr_vdev = vdev_guid;
zdp->zrd_repaired = zrp;
fmd_hdl_debug(hdl, "marking repaired vdev %llu on pool %llu",
vdev_guid, pool_guid);
}
static void
zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
const char *class)
{
(void) ep;
uint64_t pool_guid, vdev_guid;
zpool_handle_t *zhp;
nvlist_t *resource, *fault;
nvlist_t **faults;
uint_t f, nfaults;
zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
libzfs_handle_t *zhdl = zdp->zrd_hdl;
boolean_t fault_device, degrade_device;
boolean_t is_repair;
boolean_t l2arc = B_FALSE;
boolean_t spare = B_FALSE;
const char *scheme;
nvlist_t *vdev = NULL;
const char *uuid;
int repair_done = 0;
boolean_t retire;
boolean_t is_disk;
vdev_aux_t aux;
uint64_t state = 0;
vdev_stat_t *vs;
unsigned int c;
fmd_hdl_debug(hdl, "zfs_retire_recv: '%s'", class);
(void) nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE,
&state);
/*
* If this is a resource notifying us of device removal then simply
* check for an available spare and continue unless the device is a
* l2arc vdev, in which case we just offline it.
*/
if (strcmp(class, "resource.fs.zfs.removed") == 0 ||
(strcmp(class, "resource.fs.zfs.statechange") == 0 &&
(state == VDEV_STATE_REMOVED || state == VDEV_STATE_FAULTED))) {
const char *devtype;
char *devname;
if (nvlist_lookup_string(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
&devtype) == 0) {
if (strcmp(devtype, VDEV_TYPE_SPARE) == 0)
spare = B_TRUE;
else if (strcmp(devtype, VDEV_TYPE_L2CACHE) == 0)
l2arc = B_TRUE;
}
if (nvlist_lookup_uint64(nvl,
FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
return;
+ if (vdev_guid == 0) {
+ fmd_hdl_debug(hdl, "Got a zero GUID");
+ return;
+ }
+
if (spare) {
int nspares = find_and_remove_spares(zhdl, vdev_guid);
fmd_hdl_debug(hdl, "%d spares removed", nspares);
return;
}
if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
&pool_guid) != 0)
return;
if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid,
&vdev)) == NULL)
return;
devname = zpool_vdev_name(NULL, zhp, vdev, B_FALSE);
nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c);
/*
* If state removed is requested for already removed vdev,
* its a loopback event from spa_async_remove(). Just
* ignore it.
*/
if (vs->vs_state == VDEV_STATE_REMOVED &&
state == VDEV_STATE_REMOVED)
return;
/* Remove the vdev since device is unplugged */
int remove_status = 0;
if (l2arc || (strcmp(class, "resource.fs.zfs.removed") == 0)) {
remove_status = zpool_vdev_remove_wanted(zhp, devname);
fmd_hdl_debug(hdl, "zpool_vdev_remove_wanted '%s'"
", err:%d", devname, libzfs_errno(zhdl));
}
/* Replace the vdev with a spare if its not a l2arc */
if (!l2arc && !remove_status &&
(!fmd_prop_get_int32(hdl, "spare_on_remove") ||
replace_with_spare(hdl, zhp, vdev) == B_FALSE)) {
/* Could not handle with spare */
fmd_hdl_debug(hdl, "no spare for '%s'", devname);
}
free(devname);
zpool_close(zhp);
return;
}
if (strcmp(class, FM_LIST_RESOLVED_CLASS) == 0)
return;
/*
* Note: on Linux statechange events are more than just
* healthy ones so we need to confirm the actual state value.
*/
if (strcmp(class, "resource.fs.zfs.statechange") == 0 &&
state == VDEV_STATE_HEALTHY) {
zfs_vdev_repair(hdl, nvl);
return;
}
if (strcmp(class, "sysevent.fs.zfs.vdev_remove") == 0) {
zfs_vdev_repair(hdl, nvl);
return;
}
zfs_retire_clear_data(hdl, zdp);
if (strcmp(class, FM_LIST_REPAIRED_CLASS) == 0)
is_repair = B_TRUE;
else
is_repair = B_FALSE;
/*
* We subscribe to zfs faults as well as all repair events.
*/
if (nvlist_lookup_nvlist_array(nvl, FM_SUSPECT_FAULT_LIST,
&faults, &nfaults) != 0)
return;
for (f = 0; f < nfaults; f++) {
fault = faults[f];
fault_device = B_FALSE;
degrade_device = B_FALSE;
is_disk = B_FALSE;
if (nvlist_lookup_boolean_value(fault, FM_SUSPECT_RETIRE,
&retire) == 0 && retire == 0)
continue;
/*
* While we subscribe to fault.fs.zfs.*, we only take action
* for faults targeting a specific vdev (open failure or SERD
* failure). We also subscribe to fault.io.* events, so that
* faulty disks will be faulted in the ZFS configuration.
*/
if (fmd_nvl_class_match(hdl, fault, "fault.fs.zfs.vdev.io")) {
fault_device = B_TRUE;
} else if (fmd_nvl_class_match(hdl, fault,
"fault.fs.zfs.vdev.checksum")) {
degrade_device = B_TRUE;
} else if (fmd_nvl_class_match(hdl, fault,
"fault.fs.zfs.device")) {
fault_device = B_FALSE;
} else if (fmd_nvl_class_match(hdl, fault, "fault.io.*")) {
is_disk = B_TRUE;
fault_device = B_TRUE;
} else {
continue;
}
if (is_disk) {
continue;
} else {
/*
* This is a ZFS fault. Lookup the resource, and
* attempt to find the matching vdev.
*/
if (nvlist_lookup_nvlist(fault, FM_FAULT_RESOURCE,
&resource) != 0 ||
nvlist_lookup_string(resource, FM_FMRI_SCHEME,
&scheme) != 0)
continue;
if (strcmp(scheme, FM_FMRI_SCHEME_ZFS) != 0)
continue;
if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_POOL,
&pool_guid) != 0)
continue;
if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_VDEV,
&vdev_guid) != 0) {
if (is_repair)
vdev_guid = 0;
else
continue;
}
if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid,
&vdev)) == NULL)
continue;
aux = VDEV_AUX_ERR_EXCEEDED;
}
if (vdev_guid == 0) {
/*
* For pool-level repair events, clear the entire pool.
*/
fmd_hdl_debug(hdl, "zpool_clear of pool '%s'",
zpool_get_name(zhp));
(void) zpool_clear(zhp, NULL, NULL);
zpool_close(zhp);
continue;
}
/*
* If this is a repair event, then mark the vdev as repaired and
* continue.
*/
if (is_repair) {
repair_done = 1;
fmd_hdl_debug(hdl, "zpool_clear of pool '%s' vdev %llu",
zpool_get_name(zhp), vdev_guid);
(void) zpool_vdev_clear(zhp, vdev_guid);
zpool_close(zhp);
continue;
}
/*
* Actively fault the device if needed.
*/
if (fault_device)
(void) zpool_vdev_fault(zhp, vdev_guid, aux);
if (degrade_device)
(void) zpool_vdev_degrade(zhp, vdev_guid, aux);
if (fault_device || degrade_device)
fmd_hdl_debug(hdl, "zpool_vdev_%s: vdev %llu on '%s'",
fault_device ? "fault" : "degrade", vdev_guid,
zpool_get_name(zhp));
/*
* Attempt to substitute a hot spare.
*/
(void) replace_with_spare(hdl, zhp, vdev);
zpool_close(zhp);
}
if (strcmp(class, FM_LIST_REPAIRED_CLASS) == 0 && repair_done &&
nvlist_lookup_string(nvl, FM_SUSPECT_UUID, &uuid) == 0)
fmd_case_uuresolved(hdl, uuid);
}
static const fmd_hdl_ops_t fmd_ops = {
zfs_retire_recv, /* fmdo_recv */
NULL, /* fmdo_timeout */
NULL, /* fmdo_close */
NULL, /* fmdo_stats */
NULL, /* fmdo_gc */
};
static const fmd_prop_t fmd_props[] = {
{ "spare_on_remove", FMD_TYPE_BOOL, "true" },
{ NULL, 0, NULL }
};
static const fmd_hdl_info_t fmd_info = {
"ZFS Retire Agent", "1.0", &fmd_ops, fmd_props
};
void
_zfs_retire_init(fmd_hdl_t *hdl)
{
zfs_retire_data_t *zdp;
libzfs_handle_t *zhdl;
if ((zhdl = libzfs_init()) == NULL)
return;
if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) {
libzfs_fini(zhdl);
return;
}
zdp = fmd_hdl_zalloc(hdl, sizeof (zfs_retire_data_t), FMD_SLEEP);
zdp->zrd_hdl = zhdl;
fmd_hdl_setspecific(hdl, zdp);
}
void
_zfs_retire_fini(fmd_hdl_t *hdl)
{
zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
if (zdp != NULL) {
zfs_retire_clear_data(hdl, zdp);
libzfs_fini(zdp->zrd_hdl);
fmd_hdl_free(hdl, zdp, sizeof (zfs_retire_data_t));
}
}
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/Makefile.am b/sys/contrib/openzfs/cmd/zed/zed.d/Makefile.am
index c65b43fb027e..812558cf6d0f 100644
--- a/sys/contrib/openzfs/cmd/zed/zed.d/Makefile.am
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/Makefile.am
@@ -1,55 +1,57 @@
zedconfdir = $(sysconfdir)/zfs/zed.d
dist_zedconf_DATA = \
%D%/zed-functions.sh \
%D%/zed.rc
zedexecdir = $(zfsexecdir)/zed.d
dist_zedexec_SCRIPTS = \
%D%/all-debug.sh \
%D%/all-syslog.sh \
%D%/data-notify.sh \
%D%/generic-notify.sh \
%D%/pool_import-led.sh \
%D%/resilver_finish-notify.sh \
%D%/resilver_finish-start-scrub.sh \
%D%/scrub_finish-notify.sh \
%D%/statechange-led.sh \
%D%/statechange-notify.sh \
+ %D%/statechange-slot_off.sh \
%D%/trim_finish-notify.sh \
%D%/vdev_attach-led.sh \
%D%/vdev_clear-led.sh
nodist_zedexec_SCRIPTS = \
%D%/history_event-zfs-list-cacher.sh
SUBSTFILES += $(nodist_zedexec_SCRIPTS)
zedconfdefaults = \
all-syslog.sh \
data-notify.sh \
history_event-zfs-list-cacher.sh \
pool_import-led.sh \
resilver_finish-notify.sh \
resilver_finish-start-scrub.sh \
scrub_finish-notify.sh \
statechange-led.sh \
statechange-notify.sh \
+ statechange-slot_off.sh \
vdev_attach-led.sh \
vdev_clear-led.sh
dist_noinst_DATA += %D%/README
INSTALL_DATA_HOOKS += zed-install-data-hook
zed-install-data-hook:
$(MKDIR_P) "$(DESTDIR)$(zedconfdir)"
set -x; for f in $(zedconfdefaults); do \
[ -f "$(DESTDIR)$(zedconfdir)/$${f}" ] ||\
[ -L "$(DESTDIR)$(zedconfdir)/$${f}" ] || \
$(LN_S) "$(zedexecdir)/$${f}" "$(DESTDIR)$(zedconfdir)"; \
done
SHELLCHECKSCRIPTS += $(dist_zedconf_DATA) $(dist_zedexec_SCRIPTS) $(nodist_zedexec_SCRIPTS)
$(call SHELLCHECK_OPTS,$(dist_zedconf_DATA) $(dist_zedexec_SCRIPTS) $(nodist_zedexec_SCRIPTS)): SHELLCHECK_SHELL = sh
# False positive: 1>&"${ZED_FLOCK_FD}" looks suspiciously similar to a >&filename bash extension
$(call SHELLCHECK_OPTS,$(dist_zedconf_DATA) $(dist_zedexec_SCRIPTS) $(nodist_zedexec_SCRIPTS)): CHECKBASHISMS_IGNORE = -e 'should be >word 2>&1' -e '&"$${ZED_FLOCK_FD}"'
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/statechange-slot_off.sh b/sys/contrib/openzfs/cmd/zed/zed.d/statechange-slot_off.sh
new file mode 100755
index 000000000000..150012abe71a
--- /dev/null
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/statechange-slot_off.sh
@@ -0,0 +1,64 @@
+#!/bin/sh
+# shellcheck disable=SC3014,SC2154,SC2086,SC2034
+#
+# Turn off disk's enclosure slot if it becomes FAULTED.
+#
+# Bad SCSI disks can often "disappear and reappear" causing all sorts of chaos
+# as they flip between FAULTED and ONLINE. If
+# ZED_POWER_OFF_ENCLOUSRE_SLOT_ON_FAULT is set in zed.rc, and the disk gets
+# FAULTED, then power down the slot via sysfs:
+#
+# /sys/class/enclosure/<enclosure>/<slot>/power_status
+#
+# We assume the user will be responsible for turning the slot back on again.
+#
+# Note that this script requires that your enclosure be supported by the
+# Linux SCSI Enclosure services (SES) driver. The script will do nothing
+# if you have no enclosure, or if your enclosure isn't supported.
+#
+# Exit codes:
+# 0: slot successfully powered off
+# 1: enclosure not available
+# 2: ZED_POWER_OFF_ENCLOUSRE_SLOT_ON_FAULT disabled
+# 3: vdev was not FAULTED
+# 4: The enclosure sysfs path passed from ZFS does not exist
+# 5: Enclosure slot didn't actually turn off after we told it to
+
+[ -f "${ZED_ZEDLET_DIR}/zed.rc" ] && . "${ZED_ZEDLET_DIR}/zed.rc"
+. "${ZED_ZEDLET_DIR}/zed-functions.sh"
+
+if [ ! -d /sys/class/enclosure ] ; then
+ # No JBOD enclosure or NVMe slots
+ exit 1
+fi
+
+if [ "${ZED_POWER_OFF_ENCLOUSRE_SLOT_ON_FAULT}" != "1" ] ; then
+ exit 2
+fi
+
+if [ "$ZEVENT_VDEV_STATE_STR" != "FAULTED" ] ; then
+ exit 3
+fi
+
+if [ ! -f "$ZEVENT_VDEV_ENC_SYSFS_PATH/power_status" ] ; then
+ exit 4
+fi
+
+# Turn off the slot and wait for sysfs to report that the slot is off.
+# It can take ~400ms on some enclosures and multiple retries may be needed.
+for i in $(seq 1 20) ; do
+ echo "off" | tee "$ZEVENT_VDEV_ENC_SYSFS_PATH/power_status"
+
+ for j in $(seq 1 5) ; do
+ if [ "$(cat $ZEVENT_VDEV_ENC_SYSFS_PATH/power_status)" == "off" ] ; then
+ break 2
+ fi
+ sleep 0.1
+ done
+done
+
+if [ "$(cat $ZEVENT_VDEV_ENC_SYSFS_PATH/power_status)" != "off" ] ; then
+ exit 5
+fi
+
+zed_log_msg "powered down slot $ZEVENT_VDEV_ENC_SYSFS_PATH for $ZEVENT_VDEV_PATH"
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/zed.rc b/sys/contrib/openzfs/cmd/zed/zed.d/zed.rc
index c55a70c79f75..78dc1afc7b15 100644
--- a/sys/contrib/openzfs/cmd/zed/zed.d/zed.rc
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/zed.rc
@@ -1,144 +1,149 @@
##
# zed.rc – ZEDLET configuration.
##
# shellcheck disable=SC2034
##
# Absolute path to the debug output file.
#
#ZED_DEBUG_LOG="/tmp/zed.debug.log"
##
# Email address of the zpool administrator for receipt of notifications;
# multiple addresses can be specified if they are delimited by whitespace.
# Email will only be sent if ZED_EMAIL_ADDR is defined.
# Enabled by default; comment to disable.
#
ZED_EMAIL_ADDR="root"
##
# Name or path of executable responsible for sending notifications via email;
# the mail program must be capable of reading a message body from stdin.
# Email will only be sent if ZED_EMAIL_ADDR is defined.
#
#ZED_EMAIL_PROG="mail"
##
# Command-line options for ZED_EMAIL_PROG.
# The string @ADDRESS@ will be replaced with the recipient email address(es).
# The string @SUBJECT@ will be replaced with the notification subject;
# this should be protected with quotes to prevent word-splitting.
# Email will only be sent if ZED_EMAIL_ADDR is defined.
# If @SUBJECT@ was omited here, a "Subject: ..." header will be added to notification
#
#ZED_EMAIL_OPTS="-s '@SUBJECT@' @ADDRESS@"
##
# Default directory for zed lock files.
#
#ZED_LOCKDIR="/var/lock"
##
# Minimum number of seconds between notifications for a similar event.
#
#ZED_NOTIFY_INTERVAL_SECS=3600
##
# Notification verbosity.
# If set to 0, suppress notification if the pool is healthy.
# If set to 1, send notification regardless of pool health.
#
#ZED_NOTIFY_VERBOSE=0
##
# Send notifications for 'ereport.fs.zfs.data' events.
# Disabled by default, any non-empty value will enable the feature.
#
#ZED_NOTIFY_DATA=
##
# Pushbullet access token.
# This grants full access to your account -- protect it accordingly!
# <https://www.pushbullet.com/get-started>
# <https://www.pushbullet.com/account>
# Disabled by default; uncomment to enable.
#
#ZED_PUSHBULLET_ACCESS_TOKEN=""
##
# Pushbullet channel tag for push notification feeds that can be subscribed to.
# <https://www.pushbullet.com/my-channel>
# If not defined, push notifications will instead be sent to all devices
# associated with the account specified by the access token.
# Disabled by default; uncomment to enable.
#
#ZED_PUSHBULLET_CHANNEL_TAG=""
##
# Slack Webhook URL.
# This allows posting to the given channel and includes an access token.
# <https://api.slack.com/incoming-webhooks>
# Disabled by default; uncomment to enable.
#
#ZED_SLACK_WEBHOOK_URL=""
##
# Pushover token.
# This defines the application from which the notification will be sent.
# <https://pushover.net/api#registration>
# Disabled by default; uncomment to enable.
# ZED_PUSHOVER_USER, below, must also be configured.
#
#ZED_PUSHOVER_TOKEN=""
##
# Pushover user key.
# This defines which user or group will receive Pushover notifications.
# <https://pushover.net/api#identifiers>
# Disabled by default; uncomment to enable.
# ZED_PUSHOVER_TOKEN, above, must also be configured.
#ZED_PUSHOVER_USER=""
##
# Default directory for zed state files.
#
#ZED_RUNDIR="/var/run"
##
# Turn on/off enclosure LEDs when drives get DEGRADED/FAULTED. This works for
# device mapper and multipath devices as well. This works with JBOD enclosures
# and NVMe PCI drives (assuming they're supported by Linux in sysfs).
#
ZED_USE_ENCLOSURE_LEDS=1
##
# Run a scrub after every resilver
# Disabled by default, 1 to enable and 0 to disable.
#ZED_SCRUB_AFTER_RESILVER=0
##
# The syslog priority (e.g., specified as a "facility.level" pair).
#
#ZED_SYSLOG_PRIORITY="daemon.notice"
##
# The syslog tag for marking zed events.
#
#ZED_SYSLOG_TAG="zed"
##
# Which set of event subclasses to log
# By default, events from all subclasses are logged.
# If ZED_SYSLOG_SUBCLASS_INCLUDE is set, only subclasses
# matching the pattern are logged. Use the pipe symbol (|)
# or shell wildcards (*, ?) to match multiple subclasses.
# Otherwise, if ZED_SYSLOG_SUBCLASS_EXCLUDE is set, the
# matching subclasses are excluded from logging.
#ZED_SYSLOG_SUBCLASS_INCLUDE="checksum|scrub_*|vdev.*"
ZED_SYSLOG_SUBCLASS_EXCLUDE="history_event"
##
# Use GUIDs instead of names when logging pool and vdevs
# Disabled by default, 1 to enable and 0 to disable.
#ZED_SYSLOG_DISPLAY_GUIDS=1
+##
+# Power off the drive's slot in the enclosure if it becomes FAULTED. This can
+# help silence misbehaving drives. This assumes your drive enclosure fully
+# supports slot power control via sysfs.
+#ZED_POWER_OFF_ENCLOUSRE_SLOT_ON_FAULT=1
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_main.c b/sys/contrib/openzfs/cmd/zpool/zpool_main.c
index 10a3b5b14fc9..6d0dae8d8b05 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_main.c
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_main.c
@@ -1,11179 +1,11191 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2012 by Frederik Wessels. All rights reserved.
* Copyright (c) 2012 by Cyril Plisko. All rights reserved.
* Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
* Copyright (c) 2021, Klara Inc.
* Copyright [2021] Hewlett Packard Enterprise Development LP
*/
#include <assert.h>
#include <ctype.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <getopt.h>
#include <libgen.h>
#include <libintl.h>
#include <libuutil.h>
#include <locale.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <pwd.h>
#include <zone.h>
#include <sys/wait.h>
#include <zfs_prop.h>
#include <sys/fs/zfs.h>
#include <sys/stat.h>
#include <sys/systeminfo.h>
#include <sys/fm/fs/zfs.h>
#include <sys/fm/util.h>
#include <sys/fm/protocol.h>
#include <sys/zfs_ioctl.h>
#include <sys/mount.h>
#include <sys/sysmacros.h>
#include <math.h>
#include <libzfs.h>
#include <libzutil.h>
#include "zpool_util.h"
#include "zfs_comutil.h"
#include "zfeature_common.h"
#include "statcommon.h"
libzfs_handle_t *g_zfs;
static int zpool_do_create(int, char **);
static int zpool_do_destroy(int, char **);
static int zpool_do_add(int, char **);
static int zpool_do_remove(int, char **);
static int zpool_do_labelclear(int, char **);
static int zpool_do_checkpoint(int, char **);
static int zpool_do_list(int, char **);
static int zpool_do_iostat(int, char **);
static int zpool_do_status(int, char **);
static int zpool_do_online(int, char **);
static int zpool_do_offline(int, char **);
static int zpool_do_clear(int, char **);
static int zpool_do_reopen(int, char **);
static int zpool_do_reguid(int, char **);
static int zpool_do_attach(int, char **);
static int zpool_do_detach(int, char **);
static int zpool_do_replace(int, char **);
static int zpool_do_split(int, char **);
static int zpool_do_initialize(int, char **);
static int zpool_do_scrub(int, char **);
static int zpool_do_resilver(int, char **);
static int zpool_do_trim(int, char **);
static int zpool_do_import(int, char **);
static int zpool_do_export(int, char **);
static int zpool_do_upgrade(int, char **);
static int zpool_do_history(int, char **);
static int zpool_do_events(int, char **);
static int zpool_do_get(int, char **);
static int zpool_do_set(int, char **);
static int zpool_do_sync(int, char **);
static int zpool_do_version(int, char **);
static int zpool_do_wait(int, char **);
static zpool_compat_status_t zpool_do_load_compat(
const char *, boolean_t *);
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
*/
#ifdef DEBUG
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
#endif
typedef enum {
HELP_ADD,
HELP_ATTACH,
HELP_CLEAR,
HELP_CREATE,
HELP_CHECKPOINT,
HELP_DESTROY,
HELP_DETACH,
HELP_EXPORT,
HELP_HISTORY,
HELP_IMPORT,
HELP_IOSTAT,
HELP_LABELCLEAR,
HELP_LIST,
HELP_OFFLINE,
HELP_ONLINE,
HELP_REPLACE,
HELP_REMOVE,
HELP_INITIALIZE,
HELP_SCRUB,
HELP_RESILVER,
HELP_TRIM,
HELP_STATUS,
HELP_UPGRADE,
HELP_EVENTS,
HELP_GET,
HELP_SET,
HELP_SPLIT,
HELP_SYNC,
HELP_REGUID,
HELP_REOPEN,
HELP_VERSION,
HELP_WAIT
} zpool_help_t;
/*
* Flags for stats to display with "zpool iostats"
*/
enum iostat_type {
IOS_DEFAULT = 0,
IOS_LATENCY = 1,
IOS_QUEUES = 2,
IOS_L_HISTO = 3,
IOS_RQ_HISTO = 4,
IOS_COUNT, /* always last element */
};
/* iostat_type entries as bitmasks */
#define IOS_DEFAULT_M (1ULL << IOS_DEFAULT)
#define IOS_LATENCY_M (1ULL << IOS_LATENCY)
#define IOS_QUEUES_M (1ULL << IOS_QUEUES)
#define IOS_L_HISTO_M (1ULL << IOS_L_HISTO)
#define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO)
/* Mask of all the histo bits */
#define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)
/*
* Lookup table for iostat flags to nvlist names. Basically a list
* of all the nvlists a flag requires. Also specifies the order in
* which data gets printed in zpool iostat.
*/
static const char *vsx_type_to_nvlist[IOS_COUNT][15] = {
[IOS_L_HISTO] = {
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
NULL},
[IOS_LATENCY] = {
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
NULL},
[IOS_QUEUES] = {
ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
NULL},
[IOS_RQ_HISTO] = {
ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
NULL},
};
/*
* Given a cb->cb_flags with a histogram bit set, return the iostat_type.
* Right now, only one histo bit is ever set at one time, so we can
* just do a highbit64(a)
*/
#define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1)
typedef struct zpool_command {
const char *name;
int (*func)(int, char **);
zpool_help_t usage;
} zpool_command_t;
/*
* Master command table. Each ZFS command has a name, associated function, and
* usage message. The usage messages need to be internationalized, so we have
* to have a function to return the usage message based on a command index.
*
* These commands are organized according to how they are displayed in the usage
* message. An empty command (one with a NULL name) indicates an empty line in
* the generic usage message.
*/
static zpool_command_t command_table[] = {
{ "version", zpool_do_version, HELP_VERSION },
{ NULL },
{ "create", zpool_do_create, HELP_CREATE },
{ "destroy", zpool_do_destroy, HELP_DESTROY },
{ NULL },
{ "add", zpool_do_add, HELP_ADD },
{ "remove", zpool_do_remove, HELP_REMOVE },
{ NULL },
{ "labelclear", zpool_do_labelclear, HELP_LABELCLEAR },
{ NULL },
{ "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT },
{ NULL },
{ "list", zpool_do_list, HELP_LIST },
{ "iostat", zpool_do_iostat, HELP_IOSTAT },
{ "status", zpool_do_status, HELP_STATUS },
{ NULL },
{ "online", zpool_do_online, HELP_ONLINE },
{ "offline", zpool_do_offline, HELP_OFFLINE },
{ "clear", zpool_do_clear, HELP_CLEAR },
{ "reopen", zpool_do_reopen, HELP_REOPEN },
{ NULL },
{ "attach", zpool_do_attach, HELP_ATTACH },
{ "detach", zpool_do_detach, HELP_DETACH },
{ "replace", zpool_do_replace, HELP_REPLACE },
{ "split", zpool_do_split, HELP_SPLIT },
{ NULL },
{ "initialize", zpool_do_initialize, HELP_INITIALIZE },
{ "resilver", zpool_do_resilver, HELP_RESILVER },
{ "scrub", zpool_do_scrub, HELP_SCRUB },
{ "trim", zpool_do_trim, HELP_TRIM },
{ NULL },
{ "import", zpool_do_import, HELP_IMPORT },
{ "export", zpool_do_export, HELP_EXPORT },
{ "upgrade", zpool_do_upgrade, HELP_UPGRADE },
{ "reguid", zpool_do_reguid, HELP_REGUID },
{ NULL },
{ "history", zpool_do_history, HELP_HISTORY },
{ "events", zpool_do_events, HELP_EVENTS },
{ NULL },
{ "get", zpool_do_get, HELP_GET },
{ "set", zpool_do_set, HELP_SET },
{ "sync", zpool_do_sync, HELP_SYNC },
{ NULL },
{ "wait", zpool_do_wait, HELP_WAIT },
};
#define NCOMMAND (ARRAY_SIZE(command_table))
#define VDEV_ALLOC_CLASS_LOGS "logs"
static zpool_command_t *current_command;
static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV);
static char history_str[HIS_MAX_RECORD_LEN];
static boolean_t log_history = B_TRUE;
static uint_t timestamp_fmt = NODATE;
static const char *
get_usage(zpool_help_t idx)
{
switch (idx) {
case HELP_ADD:
return (gettext("\tadd [-fgLnP] [-o property=value] "
"<pool> <vdev> ...\n"));
case HELP_ATTACH:
return (gettext("\tattach [-fsw] [-o property=value] "
"<pool> <device> <new-device>\n"));
case HELP_CLEAR:
return (gettext("\tclear [-nF] <pool> [device]\n"));
case HELP_CREATE:
return (gettext("\tcreate [-fnd] [-o property=value] ... \n"
"\t [-O file-system-property=value] ... \n"
"\t [-m mountpoint] [-R root] <pool> <vdev> ...\n"));
case HELP_CHECKPOINT:
return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n"));
case HELP_DESTROY:
return (gettext("\tdestroy [-f] <pool>\n"));
case HELP_DETACH:
return (gettext("\tdetach <pool> <device>\n"));
case HELP_EXPORT:
return (gettext("\texport [-af] <pool> ...\n"));
case HELP_HISTORY:
return (gettext("\thistory [-il] [<pool>] ...\n"));
case HELP_IMPORT:
return (gettext("\timport [-d dir] [-D]\n"
"\timport [-o mntopts] [-o property=value] ... \n"
"\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
"[-R root] [-F [-n]] -a\n"
"\timport [-o mntopts] [-o property=value] ... \n"
"\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
"[-R root] [-F [-n]]\n"
"\t [--rewind-to-checkpoint] <pool | id> [newpool]\n"));
case HELP_IOSTAT:
return (gettext("\tiostat [[[-c [script1,script2,...]"
"[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"
"\t [[pool ...]|[pool vdev ...]|[vdev ...]]"
" [[-n] interval [count]]\n"));
case HELP_LABELCLEAR:
return (gettext("\tlabelclear [-f] <vdev>\n"));
case HELP_LIST:
return (gettext("\tlist [-gHLpPv] [-o property[,...]] "
"[-T d|u] [pool] ... \n"
"\t [interval [count]]\n"));
case HELP_OFFLINE:
return (gettext("\toffline [-f] [-t] <pool> <device> ...\n"));
case HELP_ONLINE:
return (gettext("\tonline [-e] <pool> <device> ...\n"));
case HELP_REPLACE:
return (gettext("\treplace [-fsw] [-o property=value] "
"<pool> <device> [new-device]\n"));
case HELP_REMOVE:
return (gettext("\tremove [-npsw] <pool> <device> ...\n"));
case HELP_REOPEN:
return (gettext("\treopen [-n] <pool>\n"));
case HELP_INITIALIZE:
return (gettext("\tinitialize [-c | -s | -u] [-w] <pool> "
"[<device> ...]\n"));
case HELP_SCRUB:
return (gettext("\tscrub [-s | -p] [-w] [-e] <pool> ...\n"));
case HELP_RESILVER:
return (gettext("\tresilver <pool> ...\n"));
case HELP_TRIM:
return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> "
"[<device> ...]\n"));
case HELP_STATUS:
return (gettext("\tstatus [-c [script1,script2,...]] "
"[-igLpPstvxD] [-T d|u] [pool] ... \n"
"\t [interval [count]]\n"));
case HELP_UPGRADE:
return (gettext("\tupgrade\n"
"\tupgrade -v\n"
"\tupgrade [-V version] <-a | pool ...>\n"));
case HELP_EVENTS:
return (gettext("\tevents [-vHf [pool] | -c]\n"));
case HELP_GET:
return (gettext("\tget [-Hp] [-o \"all\" | field[,...]] "
"<\"all\" | property[,...]> <pool> ...\n"));
case HELP_SET:
return (gettext("\tset <property=value> <pool>\n"
"\tset <vdev_property=value> <pool> <vdev>\n"));
case HELP_SPLIT:
return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"
"\t [-o property=value] <pool> <newpool> "
"[<device> ...]\n"));
case HELP_REGUID:
return (gettext("\treguid <pool>\n"));
case HELP_SYNC:
return (gettext("\tsync [pool] ...\n"));
case HELP_VERSION:
return (gettext("\tversion\n"));
case HELP_WAIT:
return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] "
"<pool> [interval]\n"));
default:
__builtin_unreachable();
}
}
static void
zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)
{
uint_t children = 0;
nvlist_t **child;
uint_t i;
(void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children);
if (children == 0) {
char *path = zpool_vdev_name(g_zfs, zhp, nvroot,
VDEV_NAME_PATH);
if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 &&
strcmp(path, VDEV_TYPE_HOLE) != 0)
fnvlist_add_boolean(res, path);
free(path);
return;
}
for (i = 0; i < children; i++) {
zpool_collect_leaves(zhp, child[i], res);
}
}
/*
* Callback routine that will print out a pool property value.
*/
static int
print_pool_prop_cb(int prop, void *cb)
{
FILE *fp = cb;
(void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop));
if (zpool_prop_readonly(prop))
(void) fprintf(fp, " NO ");
else
(void) fprintf(fp, " YES ");
if (zpool_prop_values(prop) == NULL)
(void) fprintf(fp, "-\n");
else
(void) fprintf(fp, "%s\n", zpool_prop_values(prop));
return (ZPROP_CONT);
}
/*
* Callback routine that will print out a vdev property value.
*/
static int
print_vdev_prop_cb(int prop, void *cb)
{
FILE *fp = cb;
(void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop));
if (vdev_prop_readonly(prop))
(void) fprintf(fp, " NO ");
else
(void) fprintf(fp, " YES ");
if (vdev_prop_values(prop) == NULL)
(void) fprintf(fp, "-\n");
else
(void) fprintf(fp, "%s\n", vdev_prop_values(prop));
return (ZPROP_CONT);
}
/*
* Display usage message. If we're inside a command, display only the usage for
* that command. Otherwise, iterate over the entire command table and display
* a complete usage message.
*/
static __attribute__((noreturn)) void
usage(boolean_t requested)
{
FILE *fp = requested ? stdout : stderr;
if (current_command == NULL) {
int i;
(void) fprintf(fp, gettext("usage: zpool command args ...\n"));
(void) fprintf(fp,
gettext("where 'command' is one of the following:\n\n"));
for (i = 0; i < NCOMMAND; i++) {
if (command_table[i].name == NULL)
(void) fprintf(fp, "\n");
else
(void) fprintf(fp, "%s",
get_usage(command_table[i].usage));
}
} else {
(void) fprintf(fp, gettext("usage:\n"));
(void) fprintf(fp, "%s", get_usage(current_command->usage));
}
if (current_command != NULL &&
current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) &&
((strcmp(current_command->name, "set") == 0) ||
(strcmp(current_command->name, "get") == 0) ||
(strcmp(current_command->name, "list") == 0))) {
(void) fprintf(fp, "%s",
gettext("\nthe following properties are supported:\n"));
(void) fprintf(fp, "\n\t%-19s %s %s\n\n",
"PROPERTY", "EDIT", "VALUES");
/* Iterate over all properties */
if (current_prop_type == ZFS_TYPE_POOL) {
(void) zprop_iter(print_pool_prop_cb, fp, B_FALSE,
B_TRUE, current_prop_type);
(void) fprintf(fp, "\t%-19s ", "feature@...");
(void) fprintf(fp, "YES "
"disabled | enabled | active\n");
(void) fprintf(fp, gettext("\nThe feature@ properties "
"must be appended with a feature name.\n"
"See zpool-features(7).\n"));
} else if (current_prop_type == ZFS_TYPE_VDEV) {
(void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE,
B_TRUE, current_prop_type);
}
}
/*
* See comments at end of main().
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
exit(requested ? 0 : 2);
}
/*
* zpool initialize [-c | -s | -u] [-w] <pool> [<vdev> ...]
* Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
* if none specified.
*
* -c Cancel. Ends active initializing.
* -s Suspend. Initializing can then be restarted with no flags.
* -u Uninitialize. Clears initialization state.
* -w Wait. Blocks until initializing has completed.
*/
int
zpool_do_initialize(int argc, char **argv)
{
int c;
char *poolname;
zpool_handle_t *zhp;
nvlist_t *vdevs;
int err = 0;
boolean_t wait = B_FALSE;
struct option long_options[] = {
{"cancel", no_argument, NULL, 'c'},
{"suspend", no_argument, NULL, 's'},
{"uninit", no_argument, NULL, 'u'},
{"wait", no_argument, NULL, 'w'},
{0, 0, 0, 0}
};
pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;
while ((c = getopt_long(argc, argv, "csuw", long_options,
NULL)) != -1) {
switch (c) {
case 'c':
if (cmd_type != POOL_INITIALIZE_START &&
cmd_type != POOL_INITIALIZE_CANCEL) {
(void) fprintf(stderr, gettext("-c cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_INITIALIZE_CANCEL;
break;
case 's':
if (cmd_type != POOL_INITIALIZE_START &&
cmd_type != POOL_INITIALIZE_SUSPEND) {
(void) fprintf(stderr, gettext("-s cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_INITIALIZE_SUSPEND;
break;
case 'u':
if (cmd_type != POOL_INITIALIZE_START &&
cmd_type != POOL_INITIALIZE_UNINIT) {
(void) fprintf(stderr, gettext("-u cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_INITIALIZE_UNINIT;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
if (optopt != 0) {
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("invalid option '%s'\n"),
argv[optind - 1]);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
return (-1);
}
if (wait && (cmd_type != POOL_INITIALIZE_START)) {
(void) fprintf(stderr, gettext("-w cannot be used with -c, -s"
"or -u\n"));
usage(B_FALSE);
}
poolname = argv[0];
zhp = zpool_open(g_zfs, poolname);
if (zhp == NULL)
return (-1);
vdevs = fnvlist_alloc();
if (argc == 1) {
/* no individual leaf vdevs specified, so add them all */
nvlist_t *config = zpool_get_config(zhp, NULL);
nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE);
zpool_collect_leaves(zhp, nvroot, vdevs);
} else {
for (int i = 1; i < argc; i++) {
fnvlist_add_boolean(vdevs, argv[i]);
}
}
if (wait)
err = zpool_initialize_wait(zhp, cmd_type, vdevs);
else
err = zpool_initialize(zhp, cmd_type, vdevs);
fnvlist_free(vdevs);
zpool_close(zhp);
return (err);
}
/*
* print a pool vdev config for dry runs
*/
static void
print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
const char *match, int name_flags)
{
nvlist_t **child;
uint_t c, children;
char *vname;
boolean_t printed = B_FALSE;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0) {
if (name != NULL)
(void) printf("\t%*s%s\n", indent, "", name);
return;
}
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE, is_hole = B_FALSE;
const char *class = "";
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&is_hole);
if (is_hole == B_TRUE) {
continue;
}
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (is_log)
class = VDEV_ALLOC_BIAS_LOG;
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &class);
if (strcmp(match, class) != 0)
continue;
if (!printed && name != NULL) {
(void) printf("\t%*s%s\n", indent, "", name);
printed = B_TRUE;
}
vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);
print_vdev_tree(zhp, vname, child[c], indent + 2, "",
name_flags);
free(vname);
}
}
/*
* Print the list of l2cache devices for dry runs.
*/
static void
print_cache_list(nvlist_t *nv, int indent)
{
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0 && children > 0) {
(void) printf("\t%*s%s\n", indent, "", "cache");
} else {
return;
}
for (c = 0; c < children; c++) {
char *vname;
vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
(void) printf("\t%*s%s\n", indent + 2, "", vname);
free(vname);
}
}
/*
* Print the list of spares for dry runs.
*/
static void
print_spare_list(nvlist_t *nv, int indent)
{
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0 && children > 0) {
(void) printf("\t%*s%s\n", indent, "", "spares");
} else {
return;
}
for (c = 0; c < children; c++) {
char *vname;
vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
(void) printf("\t%*s%s\n", indent + 2, "", vname);
free(vname);
}
}
static boolean_t
prop_list_contains_feature(nvlist_t *proplist)
{
nvpair_t *nvp;
for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;
nvp = nvlist_next_nvpair(proplist, nvp)) {
if (zpool_prop_feature(nvpair_name(nvp)))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Add a property pair (name, string-value) into a property nvlist.
*/
static int
add_prop_list(const char *propname, const char *propval, nvlist_t **props,
boolean_t poolprop)
{
zpool_prop_t prop = ZPOOL_PROP_INVAL;
nvlist_t *proplist;
const char *normnm;
const char *strval;
if (*props == NULL &&
nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
(void) fprintf(stderr,
gettext("internal error: out of memory\n"));
return (1);
}
proplist = *props;
if (poolprop) {
const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
const char *cname =
zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY);
if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL &&
(!zpool_prop_feature(propname) &&
!zpool_prop_vdev(propname))) {
(void) fprintf(stderr, gettext("property '%s' is "
"not a valid pool or vdev property\n"), propname);
return (2);
}
/*
* feature@ properties and version should not be specified
* at the same time.
*/
if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) &&
nvlist_exists(proplist, vname)) ||
(prop == ZPOOL_PROP_VERSION &&
prop_list_contains_feature(proplist))) {
(void) fprintf(stderr, gettext("'feature@' and "
"'version' properties cannot be specified "
"together\n"));
return (2);
}
/*
* if version is specified, only "legacy" compatibility
* may be requested
*/
if ((prop == ZPOOL_PROP_COMPATIBILITY &&
strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 &&
nvlist_exists(proplist, vname)) ||
(prop == ZPOOL_PROP_VERSION &&
nvlist_exists(proplist, cname) &&
strcmp(fnvlist_lookup_string(proplist, cname),
ZPOOL_COMPAT_LEGACY) != 0)) {
(void) fprintf(stderr, gettext("when 'version' is "
"specified, the 'compatibility' feature may only "
"be set to '" ZPOOL_COMPAT_LEGACY "'\n"));
return (2);
}
if (zpool_prop_feature(propname) || zpool_prop_vdev(propname))
normnm = propname;
else
normnm = zpool_prop_to_name(prop);
} else {
zfs_prop_t fsprop = zfs_name_to_prop(propname);
if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM,
B_FALSE)) {
normnm = zfs_prop_to_name(fsprop);
} else if (zfs_prop_user(propname) ||
zfs_prop_userquota(propname)) {
normnm = propname;
} else {
(void) fprintf(stderr, gettext("property '%s' is "
"not a valid filesystem property\n"), propname);
return (2);
}
}
if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
prop != ZPOOL_PROP_CACHEFILE) {
(void) fprintf(stderr, gettext("property '%s' "
"specified multiple times\n"), propname);
return (2);
}
if (nvlist_add_string(proplist, normnm, propval) != 0) {
(void) fprintf(stderr, gettext("internal "
"error: out of memory\n"));
return (1);
}
return (0);
}
/*
* Set a default property pair (name, string-value) in a property nvlist
*/
static int
add_prop_list_default(const char *propname, const char *propval,
nvlist_t **props)
{
const char *pval;
if (nvlist_lookup_string(*props, propname, &pval) == 0)
return (0);
return (add_prop_list(propname, propval, props, B_TRUE));
}
/*
* zpool add [-fgLnP] [-o property=value] <pool> <vdev> ...
*
* -f Force addition of devices, even if they appear in use
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -n Do not add the devices, but display the resulting layout if
* they were to be added.
* -o Set property=value.
* -P Display full path for vdev name.
*
* Adds the given vdevs to 'pool'. As with create, the bulk of this work is
* handled by make_root_vdev(), which constructs the nvlist needed to pass to
* libzfs.
*/
int
zpool_do_add(int argc, char **argv)
{
boolean_t force = B_FALSE;
boolean_t dryrun = B_FALSE;
int name_flags = 0;
int c;
nvlist_t *nvroot;
char *poolname;
int ret;
zpool_handle_t *zhp;
nvlist_t *config;
nvlist_t *props = NULL;
char *propval;
/* check options */
while ((c = getopt(argc, argv, "fgLno:P")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case 'g':
name_flags |= VDEV_NAME_GUID;
break;
case 'L':
name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'o':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -o option\n"));
usage(B_FALSE);
}
*propval = '\0';
propval++;
if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
(add_prop_list(optarg, propval, &props, B_TRUE)))
usage(B_FALSE);
break;
case 'P':
name_flags |= VDEV_NAME_PATH;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing vdev specification\n"));
usage(B_FALSE);
}
poolname = argv[0];
argc--;
argv++;
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
if ((config = zpool_get_config(zhp, NULL)) == NULL) {
(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
poolname);
zpool_close(zhp);
return (1);
}
/* unless manually specified use "ashift" pool property (if set) */
if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
int intval;
zprop_source_t src;
char strval[ZPOOL_MAXPROPLEN];
intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
if (src != ZPROP_SRC_DEFAULT) {
(void) sprintf(strval, "%" PRId32, intval);
verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
&props, B_TRUE) == 0);
}
}
/* pass off to make_root_vdev for processing */
nvroot = make_root_vdev(zhp, props, force, !force, B_FALSE, dryrun,
argc, argv);
if (nvroot == NULL) {
zpool_close(zhp);
return (1);
}
if (dryrun) {
nvlist_t *poolnvroot;
nvlist_t **l2child, **sparechild;
uint_t l2children, sparechildren, c;
char *vname;
boolean_t hadcache = B_FALSE, hadspare = B_FALSE;
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&poolnvroot) == 0);
(void) printf(gettext("would update '%s' to the following "
"configuration:\n\n"), zpool_get_name(zhp));
/* print original main pool and new tree */
print_vdev_tree(zhp, poolname, poolnvroot, 0, "",
name_flags | VDEV_NAME_TYPE_ID);
print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags);
/* print other classes: 'dedup', 'special', and 'log' */
if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) {
print_vdev_tree(zhp, "dedup", poolnvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, name_flags);
} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) {
print_vdev_tree(zhp, "dedup", nvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, name_flags);
}
if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
print_vdev_tree(zhp, "special", poolnvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
print_vdev_tree(zhp, "special", nvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
}
if (num_logs(poolnvroot) > 0) {
print_vdev_tree(zhp, "logs", poolnvroot, 0,
VDEV_ALLOC_BIAS_LOG, name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0,
VDEV_ALLOC_BIAS_LOG, name_flags);
} else if (num_logs(nvroot) > 0) {
print_vdev_tree(zhp, "logs", nvroot, 0,
VDEV_ALLOC_BIAS_LOG, name_flags);
}
/* Do the same for the caches */
if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,
&l2child, &l2children) == 0 && l2children) {
hadcache = B_TRUE;
(void) printf(gettext("\tcache\n"));
for (c = 0; c < l2children; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
l2child[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2child, &l2children) == 0 && l2children) {
if (!hadcache)
(void) printf(gettext("\tcache\n"));
for (c = 0; c < l2children; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
l2child[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
/* And finally the spares */
if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,
&sparechild, &sparechildren) == 0 && sparechildren > 0) {
hadspare = B_TRUE;
(void) printf(gettext("\tspares\n"));
for (c = 0; c < sparechildren; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
sparechild[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&sparechild, &sparechildren) == 0 && sparechildren > 0) {
if (!hadspare)
(void) printf(gettext("\tspares\n"));
for (c = 0; c < sparechildren; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
sparechild[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
ret = 0;
} else {
ret = (zpool_add(zhp, nvroot) != 0);
}
nvlist_free(props);
nvlist_free(nvroot);
zpool_close(zhp);
return (ret);
}
/*
* zpool remove [-npsw] <pool> <vdev> ...
*
* Removes the given vdev from the pool.
*/
int
zpool_do_remove(int argc, char **argv)
{
char *poolname;
int i, ret = 0;
zpool_handle_t *zhp = NULL;
boolean_t stop = B_FALSE;
int c;
boolean_t noop = B_FALSE;
boolean_t parsable = B_FALSE;
boolean_t wait = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "npsw")) != -1) {
switch (c) {
case 'n':
noop = B_TRUE;
break;
case 'p':
parsable = B_TRUE;
break;
case 's':
stop = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
if (stop && noop) {
zpool_close(zhp);
(void) fprintf(stderr, gettext("stop request ignored\n"));
return (0);
}
if (stop) {
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (zpool_vdev_remove_cancel(zhp) != 0)
ret = 1;
if (wait) {
(void) fprintf(stderr, gettext("invalid option "
"combination: -w cannot be used with -s\n"));
usage(B_FALSE);
}
} else {
if (argc < 2) {
(void) fprintf(stderr, gettext("missing device\n"));
usage(B_FALSE);
}
for (i = 1; i < argc; i++) {
if (noop) {
uint64_t size;
if (zpool_vdev_indirect_size(zhp, argv[i],
&size) != 0) {
ret = 1;
break;
}
if (parsable) {
(void) printf("%s %llu\n",
argv[i], (unsigned long long)size);
} else {
char valstr[32];
zfs_nicenum(size, valstr,
sizeof (valstr));
(void) printf("Memory that will be "
"used after removing %s: %s\n",
argv[i], valstr);
}
} else {
if (zpool_vdev_remove(zhp, argv[i]) != 0)
ret = 1;
}
}
if (ret == 0 && wait)
ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE);
}
zpool_close(zhp);
return (ret);
}
/*
* Return 1 if a vdev is active (being used in a pool)
* Return 0 if a vdev is inactive (offlined or faulted, or not in active pool)
*
* This is useful for checking if a disk in an active pool is offlined or
* faulted.
*/
static int
vdev_is_active(char *vdev_path)
{
int fd;
fd = open(vdev_path, O_EXCL);
if (fd < 0) {
return (1); /* cant open O_EXCL - disk is active */
}
close(fd);
return (0); /* disk is inactive in the pool */
}
/*
* zpool labelclear [-f] <vdev>
*
* -f Force clearing the label for the vdevs which are members of
* the exported or foreign pools.
*
* Verifies that the vdev is not active and zeros out the label information
* on the device.
*/
int
zpool_do_labelclear(int argc, char **argv)
{
char vdev[MAXPATHLEN];
char *name = NULL;
int c, fd = -1, ret = 0;
nvlist_t *config;
pool_state_t state;
boolean_t inuse = B_FALSE;
boolean_t force = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "f")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get vdev name */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing vdev name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
(void) strlcpy(vdev, argv[0], sizeof (vdev));
/*
* If we cannot open an absolute path, we quit.
* Otherwise if the provided vdev name doesn't point to a file,
* try prepending expected disk paths and partition numbers.
*/
if ((fd = open(vdev, O_RDWR)) < 0) {
int error;
if (vdev[0] == '/') {
(void) fprintf(stderr, gettext("failed to open "
"%s: %s\n"), vdev, strerror(errno));
return (1);
}
error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
error = ENOENT;
}
if (error || ((fd = open(vdev, O_RDWR)) < 0)) {
if (errno == ENOENT) {
(void) fprintf(stderr, gettext(
"failed to find device %s, try "
"specifying absolute path instead\n"),
argv[0]);
return (1);
}
(void) fprintf(stderr, gettext("failed to open %s:"
" %s\n"), vdev, strerror(errno));
return (1);
}
}
/*
* Flush all dirty pages for the block device. This should not be
* fatal when the device does not support BLKFLSBUF as would be the
* case for a file vdev.
*/
if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY))
(void) fprintf(stderr, gettext("failed to invalidate "
"cache for %s: %s\n"), vdev, strerror(errno));
if (zpool_read_label(fd, &config, NULL) != 0) {
(void) fprintf(stderr,
gettext("failed to read label from %s\n"), vdev);
ret = 1;
goto errout;
}
nvlist_free(config);
ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to check state for %s\n"), vdev);
ret = 1;
goto errout;
}
if (!inuse)
goto wipe_label;
switch (state) {
default:
case POOL_STATE_ACTIVE:
case POOL_STATE_SPARE:
case POOL_STATE_L2CACHE:
/*
* We allow the user to call 'zpool offline -f'
* on an offlined disk in an active pool. We can check if
* the disk is online by calling vdev_is_active().
*/
if (force && !vdev_is_active(vdev))
break;
(void) fprintf(stderr, gettext(
"%s is a member (%s) of pool \"%s\""),
vdev, zpool_pool_state_to_name(state), name);
if (force) {
(void) fprintf(stderr, gettext(
". Offline the disk first to clear its label."));
}
printf("\n");
ret = 1;
goto errout;
case POOL_STATE_EXPORTED:
if (force)
break;
(void) fprintf(stderr, gettext(
"use '-f' to override the following error:\n"
"%s is a member of exported pool \"%s\"\n"),
vdev, name);
ret = 1;
goto errout;
case POOL_STATE_POTENTIALLY_ACTIVE:
if (force)
break;
(void) fprintf(stderr, gettext(
"use '-f' to override the following error:\n"
"%s is a member of potentially active pool \"%s\"\n"),
vdev, name);
ret = 1;
goto errout;
case POOL_STATE_DESTROYED:
/* inuse should never be set for a destroyed pool */
assert(0);
break;
}
wipe_label:
ret = zpool_clear_label(fd);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to clear label for %s\n"), vdev);
}
errout:
free(name);
(void) close(fd);
return (ret);
}
/*
* zpool create [-fnd] [-o property=value] ...
* [-O file-system-property=value] ...
* [-R root] [-m mountpoint] <pool> <dev> ...
*
* -f Force creation, even if devices appear in use
* -n Do not create the pool, but display the resulting layout if it
* were to be created.
* -R Create a pool under an alternate root
* -m Set default mountpoint for the root dataset. By default it's
* '/<pool>'
* -o Set property=value.
* -o Set feature@feature=enabled|disabled.
* -d Don't automatically enable all supported pool features
* (individual features can be enabled with -o).
* -O Set fsproperty=value in the pool's root file system
*
* Creates the named pool according to the given vdev specification. The
* bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c.
* Once we get the nvlist back from make_root_vdev(), we either print out the
* contents (if '-n' was specified), or pass it to libzfs to do the creation.
*/
int
zpool_do_create(int argc, char **argv)
{
boolean_t force = B_FALSE;
boolean_t dryrun = B_FALSE;
boolean_t enable_pool_features = B_TRUE;
int c;
nvlist_t *nvroot = NULL;
char *poolname;
char *tname = NULL;
int ret = 1;
char *altroot = NULL;
char *compat = NULL;
char *mountpoint = NULL;
nvlist_t *fsprops = NULL;
nvlist_t *props = NULL;
char *propval;
/* check options */
while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'd':
enable_pool_features = B_FALSE;
break;
case 'R':
altroot = optarg;
if (add_prop_list(zpool_prop_to_name(
ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
goto errout;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props))
goto errout;
break;
case 'm':
/* Equivalent to -O mountpoint=optarg */
mountpoint = optarg;
break;
case 'o':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -o option\n"));
goto errout;
}
*propval = '\0';
propval++;
if (add_prop_list(optarg, propval, &props, B_TRUE))
goto errout;
/*
* If the user is creating a pool that doesn't support
* feature flags, don't enable any features.
*/
if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {
char *end;
u_longlong_t ver;
ver = strtoull(propval, &end, 10);
if (*end == '\0' &&
ver < SPA_VERSION_FEATURES) {
enable_pool_features = B_FALSE;
}
}
if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT)
altroot = propval;
if (zpool_name_to_prop(optarg) ==
ZPOOL_PROP_COMPATIBILITY)
compat = propval;
break;
case 'O':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -O option\n"));
goto errout;
}
*propval = '\0';
propval++;
/*
* Mountpoints are checked and then added later.
* Uniquely among properties, they can be specified
* more than once, to avoid conflict with -m.
*/
if (0 == strcmp(optarg,
zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) {
mountpoint = propval;
} else if (add_prop_list(optarg, propval, &fsprops,
B_FALSE)) {
goto errout;
}
break;
case 't':
/*
* Sanity check temporary pool name.
*/
if (strchr(optarg, '/') != NULL) {
(void) fprintf(stderr, gettext("cannot create "
"'%s': invalid character '/' in temporary "
"name\n"), optarg);
(void) fprintf(stderr, gettext("use 'zfs "
"create' to create a dataset\n"));
goto errout;
}
if (add_prop_list(zpool_prop_to_name(
ZPOOL_PROP_TNAME), optarg, &props, B_TRUE))
goto errout;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props))
goto errout;
tname = optarg;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
goto badusage;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto badusage;
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
goto badusage;
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing vdev specification\n"));
goto badusage;
}
poolname = argv[0];
/*
* As a special case, check for use of '/' in the name, and direct the
* user to use 'zfs create' instead.
*/
if (strchr(poolname, '/') != NULL) {
(void) fprintf(stderr, gettext("cannot create '%s': invalid "
"character '/' in pool name\n"), poolname);
(void) fprintf(stderr, gettext("use 'zfs create' to "
"create a dataset\n"));
goto errout;
}
/* pass off to make_root_vdev for bulk processing */
nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun,
argc - 1, argv + 1);
if (nvroot == NULL)
goto errout;
/* make_root_vdev() allows 0 toplevel children if there are spares */
if (!zfs_allocatable_devs(nvroot)) {
(void) fprintf(stderr, gettext("invalid vdev "
"specification: at least one toplevel vdev must be "
"specified\n"));
goto errout;
}
if (altroot != NULL && altroot[0] != '/') {
(void) fprintf(stderr, gettext("invalid alternate root '%s': "
"must be an absolute path\n"), altroot);
goto errout;
}
/*
* Check the validity of the mountpoint and direct the user to use the
* '-m' mountpoint option if it looks like its in use.
*/
if (mountpoint == NULL ||
(strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&
strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) {
char buf[MAXPATHLEN];
DIR *dirp;
if (mountpoint && mountpoint[0] != '/') {
(void) fprintf(stderr, gettext("invalid mountpoint "
"'%s': must be an absolute path, 'legacy', or "
"'none'\n"), mountpoint);
goto errout;
}
if (mountpoint == NULL) {
if (altroot != NULL)
(void) snprintf(buf, sizeof (buf), "%s/%s",
altroot, poolname);
else
(void) snprintf(buf, sizeof (buf), "/%s",
poolname);
} else {
if (altroot != NULL)
(void) snprintf(buf, sizeof (buf), "%s%s",
altroot, mountpoint);
else
(void) snprintf(buf, sizeof (buf), "%s",
mountpoint);
}
if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {
(void) fprintf(stderr, gettext("mountpoint '%s' : "
"%s\n"), buf, strerror(errno));
(void) fprintf(stderr, gettext("use '-m' "
"option to provide a different default\n"));
goto errout;
} else if (dirp) {
int count = 0;
while (count < 3 && readdir(dirp) != NULL)
count++;
(void) closedir(dirp);
if (count > 2) {
(void) fprintf(stderr, gettext("mountpoint "
"'%s' exists and is not empty\n"), buf);
(void) fprintf(stderr, gettext("use '-m' "
"option to provide a "
"different default\n"));
goto errout;
}
}
}
/*
* Now that the mountpoint's validity has been checked, ensure that
* the property is set appropriately prior to creating the pool.
*/
if (mountpoint != NULL) {
ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
mountpoint, &fsprops, B_FALSE);
if (ret != 0)
goto errout;
}
ret = 1;
if (dryrun) {
/*
* For a dry run invocation, print out a basic message and run
* through all the vdevs in the list and print out in an
* appropriate hierarchy.
*/
(void) printf(gettext("would create '%s' with the "
"following layout:\n\n"), poolname);
print_vdev_tree(NULL, poolname, nvroot, 0, "", 0);
print_vdev_tree(NULL, "dedup", nvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, 0);
print_vdev_tree(NULL, "special", nvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, 0);
print_vdev_tree(NULL, "logs", nvroot, 0,
VDEV_ALLOC_BIAS_LOG, 0);
print_cache_list(nvroot, 0);
print_spare_list(nvroot, 0);
ret = 0;
} else {
/*
* Load in feature set.
* Note: if compatibility property not given, we'll have
* NULL, which means 'all features'.
*/
boolean_t requested_features[SPA_FEATURES];
if (zpool_do_load_compat(compat, requested_features) !=
ZPOOL_COMPATIBILITY_OK)
goto errout;
/*
* props contains list of features to enable.
* For each feature:
* - remove it if feature@name=disabled
* - leave it there if feature@name=enabled
* - add it if:
* - enable_pool_features (ie: no '-d' or '-o version')
* - it's supported by the kernel module
* - it's in the requested feature set
* - warn if it's enabled but not in compat
*/
for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
char propname[MAXPATHLEN];
const char *propval;
zfeature_info_t *feat = &spa_feature_table[i];
(void) snprintf(propname, sizeof (propname),
"feature@%s", feat->fi_uname);
if (!nvlist_lookup_string(props, propname, &propval)) {
if (strcmp(propval,
ZFS_FEATURE_DISABLED) == 0) {
(void) nvlist_remove_all(props,
propname);
} else if (strcmp(propval,
ZFS_FEATURE_ENABLED) == 0 &&
!requested_features[i]) {
(void) fprintf(stderr, gettext(
"Warning: feature \"%s\" enabled "
"but is not in specified "
"'compatibility' feature set.\n"),
feat->fi_uname);
}
} else if (
enable_pool_features &&
feat->fi_zfs_mod_supported &&
requested_features[i]) {
ret = add_prop_list(propname,
ZFS_FEATURE_ENABLED, &props, B_TRUE);
if (ret != 0)
goto errout;
}
}
ret = 1;
if (zpool_create(g_zfs, poolname,
nvroot, props, fsprops) == 0) {
zfs_handle_t *pool = zfs_open(g_zfs,
tname ? tname : poolname, ZFS_TYPE_FILESYSTEM);
if (pool != NULL) {
if (zfs_mount(pool, NULL, 0) == 0) {
ret = zfs_share(pool, NULL);
zfs_commit_shares(NULL);
}
zfs_close(pool);
}
} else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {
(void) fprintf(stderr, gettext("pool name may have "
"been omitted\n"));
}
}
errout:
nvlist_free(nvroot);
nvlist_free(fsprops);
nvlist_free(props);
return (ret);
badusage:
nvlist_free(fsprops);
nvlist_free(props);
usage(B_FALSE);
return (2);
}
/*
* zpool destroy <pool>
*
* -f Forcefully unmount any datasets
*
* Destroy the given pool. Automatically unmounts any datasets in the pool.
*/
int
zpool_do_destroy(int argc, char **argv)
{
boolean_t force = B_FALSE;
int c;
char *pool;
zpool_handle_t *zhp;
int ret;
/* check options */
while ((c = getopt(argc, argv, "f")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
pool = argv[0];
if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
/*
* As a special case, check for use of '/' in the name, and
* direct the user to use 'zfs destroy' instead.
*/
if (strchr(pool, '/') != NULL)
(void) fprintf(stderr, gettext("use 'zfs destroy' to "
"destroy a dataset\n"));
return (1);
}
if (zpool_disable_datasets(zhp, force) != 0) {
(void) fprintf(stderr, gettext("could not destroy '%s': "
"could not unmount datasets\n"), zpool_get_name(zhp));
zpool_close(zhp);
return (1);
}
/* The history must be logged as part of the export */
log_history = B_FALSE;
ret = (zpool_destroy(zhp, history_str) != 0);
zpool_close(zhp);
return (ret);
}
typedef struct export_cbdata {
boolean_t force;
boolean_t hardforce;
} export_cbdata_t;
/*
* Export one pool
*/
static int
zpool_export_one(zpool_handle_t *zhp, void *data)
{
export_cbdata_t *cb = data;
if (zpool_disable_datasets(zhp, cb->force) != 0)
return (1);
/* The history must be logged as part of the export */
log_history = B_FALSE;
if (cb->hardforce) {
if (zpool_export_force(zhp, history_str) != 0)
return (1);
} else if (zpool_export(zhp, cb->force, history_str) != 0) {
return (1);
}
return (0);
}
/*
* zpool export [-f] <pool> ...
*
* -a Export all pools
* -f Forcefully unmount datasets
*
* Export the given pools. By default, the command will attempt to cleanly
* unmount any active datasets within the pool. If the '-f' flag is specified,
* then the datasets will be forcefully unmounted.
*/
int
zpool_do_export(int argc, char **argv)
{
export_cbdata_t cb;
boolean_t do_all = B_FALSE;
boolean_t force = B_FALSE;
boolean_t hardforce = B_FALSE;
int c, ret;
/* check options */
while ((c = getopt(argc, argv, "afF")) != -1) {
switch (c) {
case 'a':
do_all = B_TRUE;
break;
case 'f':
force = B_TRUE;
break;
case 'F':
hardforce = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
cb.force = force;
cb.hardforce = hardforce;
argc -= optind;
argv += optind;
if (do_all) {
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
return (for_each_pool(argc, argv, B_TRUE, NULL,
ZFS_TYPE_POOL, B_FALSE, zpool_export_one, &cb));
}
/* check arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool argument\n"));
usage(B_FALSE);
}
ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, zpool_export_one, &cb);
return (ret);
}
/*
* Given a vdev configuration, determine the maximum width needed for the device
* name column.
*/
static int
max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
int name_flags)
{
static const char *const subtypes[] =
{ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN};
char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);
max = MAX(strlen(name) + depth, max);
free(name);
nvlist_t **child;
uint_t children;
for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i)
if (nvlist_lookup_nvlist_array(nv, subtypes[i],
&child, &children) == 0)
for (uint_t c = 0; c < children; ++c)
max = MAX(max_width(zhp, child[c], depth + 2,
max, name_flags), max);
return (max);
}
typedef struct spare_cbdata {
uint64_t cb_guid;
zpool_handle_t *cb_zhp;
} spare_cbdata_t;
static boolean_t
find_vdev(nvlist_t *nv, uint64_t search)
{
uint64_t guid;
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
search == guid)
return (B_TRUE);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if (find_vdev(child[c], search))
return (B_TRUE);
}
return (B_FALSE);
}
static int
find_spare(zpool_handle_t *zhp, void *data)
{
spare_cbdata_t *cbp = data;
nvlist_t *config, *nvroot;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
if (find_vdev(nvroot, cbp->cb_guid)) {
cbp->cb_zhp = zhp;
return (1);
}
zpool_close(zhp);
return (0);
}
typedef struct status_cbdata {
int cb_count;
int cb_name_flags;
int cb_namewidth;
boolean_t cb_allpools;
boolean_t cb_verbose;
boolean_t cb_literal;
boolean_t cb_explain;
boolean_t cb_first;
boolean_t cb_dedup_stats;
boolean_t cb_print_status;
boolean_t cb_print_slow_ios;
boolean_t cb_print_vdev_init;
boolean_t cb_print_vdev_trim;
vdev_cmd_data_list_t *vcdl;
} status_cbdata_t;
/* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */
static boolean_t
is_blank_str(const char *str)
{
for (; str != NULL && *str != '\0'; ++str)
if (!isblank(*str))
return (B_FALSE);
return (B_TRUE);
}
/* Print command output lines for specific vdev in a specific pool */
static void
zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path)
{
vdev_cmd_data_t *data;
int i, j;
const char *val;
for (i = 0; i < vcdl->count; i++) {
if ((strcmp(vcdl->data[i].path, path) != 0) ||
(strcmp(vcdl->data[i].pool, pool) != 0)) {
/* Not the vdev we're looking for */
continue;
}
data = &vcdl->data[i];
/* Print out all the output values for this vdev */
for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
val = NULL;
/* Does this vdev have values for this column? */
for (int k = 0; k < data->cols_cnt; k++) {
if (strcmp(data->cols[k],
vcdl->uniq_cols[j]) == 0) {
/* yes it does, record the value */
val = data->lines[k];
break;
}
}
/*
* Mark empty values with dashes to make output
* awk-able.
*/
if (val == NULL || is_blank_str(val))
val = "-";
printf("%*s", vcdl->uniq_cols_width[j], val);
if (j < vcdl->uniq_cols_cnt - 1)
fputs(" ", stdout);
}
/* Print out any values that aren't in a column at the end */
for (j = data->cols_cnt; j < data->lines_cnt; j++) {
/* Did we have any columns? If so print a spacer. */
if (vcdl->uniq_cols_cnt > 0)
fputs(" ", stdout);
val = data->lines[j];
fputs(val ?: "", stdout);
}
break;
}
}
/*
* Print vdev initialization status for leaves
*/
static void
print_status_initialize(vdev_stat_t *vs, boolean_t verbose)
{
if (verbose) {
if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE ||
vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) &&
!vs->vs_scan_removing) {
char zbuf[1024];
char tbuf[256];
struct tm zaction_ts;
time_t t = vs->vs_initialize_action_time;
int initialize_pct = 100;
if (vs->vs_initialize_state !=
VDEV_INITIALIZE_COMPLETE) {
initialize_pct = (vs->vs_initialize_bytes_done *
100 / (vs->vs_initialize_bytes_est + 1));
}
(void) localtime_r(&t, &zaction_ts);
(void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts);
switch (vs->vs_initialize_state) {
case VDEV_INITIALIZE_SUSPENDED:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("suspended, started at"), tbuf);
break;
case VDEV_INITIALIZE_ACTIVE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("started at"), tbuf);
break;
case VDEV_INITIALIZE_COMPLETE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("completed at"), tbuf);
break;
}
(void) printf(gettext(" (%d%% initialized%s)"),
initialize_pct, zbuf);
} else {
(void) printf(gettext(" (uninitialized)"));
}
} else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) {
(void) printf(gettext(" (initializing)"));
}
}
/*
* Print vdev TRIM status for leaves
*/
static void
print_status_trim(vdev_stat_t *vs, boolean_t verbose)
{
if (verbose) {
if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE ||
vs->vs_trim_state == VDEV_TRIM_SUSPENDED ||
vs->vs_trim_state == VDEV_TRIM_COMPLETE) &&
!vs->vs_scan_removing) {
char zbuf[1024];
char tbuf[256];
struct tm zaction_ts;
time_t t = vs->vs_trim_action_time;
int trim_pct = 100;
if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) {
trim_pct = (vs->vs_trim_bytes_done *
100 / (vs->vs_trim_bytes_est + 1));
}
(void) localtime_r(&t, &zaction_ts);
(void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts);
switch (vs->vs_trim_state) {
case VDEV_TRIM_SUSPENDED:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("suspended, started at"), tbuf);
break;
case VDEV_TRIM_ACTIVE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("started at"), tbuf);
break;
case VDEV_TRIM_COMPLETE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("completed at"), tbuf);
break;
}
(void) printf(gettext(" (%d%% trimmed%s)"),
trim_pct, zbuf);
} else if (vs->vs_trim_notsup) {
(void) printf(gettext(" (trim unsupported)"));
} else {
(void) printf(gettext(" (untrimmed)"));
}
} else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) {
(void) printf(gettext(" (trimming)"));
}
}
/*
* Return the color associated with a health string. This includes returning
* NULL for no color change.
*/
static const char *
health_str_to_color(const char *health)
{
if (strcmp(health, gettext("FAULTED")) == 0 ||
strcmp(health, gettext("SUSPENDED")) == 0 ||
strcmp(health, gettext("UNAVAIL")) == 0) {
return (ANSI_RED);
}
if (strcmp(health, gettext("OFFLINE")) == 0 ||
strcmp(health, gettext("DEGRADED")) == 0 ||
strcmp(health, gettext("REMOVED")) == 0) {
return (ANSI_YELLOW);
}
return (NULL);
}
/*
* Print out configuration state as requested by status_callback.
*/
static void
print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs)
{
nvlist_t **child, *root;
uint_t c, i, vsc, children;
pool_scan_stat_t *ps = NULL;
vdev_stat_t *vs;
char rbuf[6], wbuf[6], cbuf[6];
char *vname;
uint64_t notpresent;
spare_cbdata_t spare_cb;
const char *state;
const char *type;
const char *path = NULL;
const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) == 0);
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
return;
state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
if (isspare) {
/*
* For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
* online drives.
*/
if (vs->vs_aux == VDEV_AUX_SPARED)
state = gettext("INUSE");
else if (vs->vs_state == VDEV_STATE_HEALTHY)
state = gettext("AVAIL");
}
printf_color(health_str_to_color(state),
"\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth,
name, state);
if (!isspare) {
if (vs->vs_read_errors)
rcolor = ANSI_RED;
if (vs->vs_write_errors)
wcolor = ANSI_RED;
if (vs->vs_checksum_errors)
ccolor = ANSI_RED;
if (cb->cb_literal) {
fputc(' ', stdout);
printf_color(rcolor, "%5llu",
(u_longlong_t)vs->vs_read_errors);
fputc(' ', stdout);
printf_color(wcolor, "%5llu",
(u_longlong_t)vs->vs_write_errors);
fputc(' ', stdout);
printf_color(ccolor, "%5llu",
(u_longlong_t)vs->vs_checksum_errors);
} else {
zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
zfs_nicenum(vs->vs_checksum_errors, cbuf,
sizeof (cbuf));
fputc(' ', stdout);
printf_color(rcolor, "%5s", rbuf);
fputc(' ', stdout);
printf_color(wcolor, "%5s", wbuf);
fputc(' ', stdout);
printf_color(ccolor, "%5s", cbuf);
}
if (cb->cb_print_slow_ios) {
if (children == 0) {
/* Only leafs vdevs have slow IOs */
zfs_nicenum(vs->vs_slow_ios, rbuf,
sizeof (rbuf));
} else {
snprintf(rbuf, sizeof (rbuf), "-");
}
if (cb->cb_literal)
printf(" %5llu", (u_longlong_t)vs->vs_slow_ios);
else
printf(" %5s", rbuf);
}
}
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&notpresent) == 0) {
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
(void) printf(" %s %s", gettext("was"), path);
} else if (vs->vs_aux != 0) {
(void) printf(" ");
color_start(ANSI_RED);
switch (vs->vs_aux) {
case VDEV_AUX_OPEN_FAILED:
(void) printf(gettext("cannot open"));
break;
case VDEV_AUX_BAD_GUID_SUM:
(void) printf(gettext("missing device"));
break;
case VDEV_AUX_NO_REPLICAS:
(void) printf(gettext("insufficient replicas"));
break;
case VDEV_AUX_VERSION_NEWER:
(void) printf(gettext("newer version"));
break;
case VDEV_AUX_UNSUP_FEAT:
(void) printf(gettext("unsupported feature(s)"));
break;
case VDEV_AUX_ASHIFT_TOO_BIG:
(void) printf(gettext("unsupported minimum blocksize"));
break;
case VDEV_AUX_SPARED:
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
&spare_cb.cb_guid) == 0);
if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
if (strcmp(zpool_get_name(spare_cb.cb_zhp),
zpool_get_name(zhp)) == 0)
(void) printf(gettext("currently in "
"use"));
else
(void) printf(gettext("in use by "
"pool '%s'"),
zpool_get_name(spare_cb.cb_zhp));
zpool_close(spare_cb.cb_zhp);
} else {
(void) printf(gettext("currently in use"));
}
break;
case VDEV_AUX_ERR_EXCEEDED:
(void) printf(gettext("too many errors"));
break;
case VDEV_AUX_IO_FAILURE:
(void) printf(gettext("experienced I/O failures"));
break;
case VDEV_AUX_BAD_LOG:
(void) printf(gettext("bad intent log"));
break;
case VDEV_AUX_EXTERNAL:
(void) printf(gettext("external device fault"));
break;
case VDEV_AUX_SPLIT_POOL:
(void) printf(gettext("split into new pool"));
break;
case VDEV_AUX_ACTIVE:
(void) printf(gettext("currently in use"));
break;
case VDEV_AUX_CHILDREN_OFFLINE:
(void) printf(gettext("all children offline"));
break;
case VDEV_AUX_BAD_LABEL:
(void) printf(gettext("invalid label"));
break;
default:
(void) printf(gettext("corrupted data"));
break;
}
color_end();
} else if (children == 0 && !isspare &&
getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
vs->vs_configured_ashift < vs->vs_physical_ashift) {
(void) printf(
gettext(" block size: %dB configured, %dB native"),
1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift);
}
if (vs->vs_scan_removing != 0) {
(void) printf(gettext(" (removing)"));
} else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
(void) printf(gettext(" (non-allocating)"));
}
/* The root vdev has the scrub/resilver stats */
root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,
(uint64_t **)&ps, &c);
/*
* If you force fault a drive that's resilvering, its scan stats can
* get frozen in time, giving the false impression that it's
* being resilvered. That's why we check the state to see if the vdev
* is healthy before reporting "resilvering" or "repairing".
*/
if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0 &&
vs->vs_state == VDEV_STATE_HEALTHY) {
if (vs->vs_scan_processed != 0) {
(void) printf(gettext(" (%s)"),
(ps->pss_func == POOL_SCAN_RESILVER) ?
"resilvering" : "repairing");
} else if (vs->vs_resilver_deferred) {
(void) printf(gettext(" (awaiting resilver)"));
}
}
/* The top-level vdevs have the rebuild stats */
if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE &&
children == 0 && vs->vs_state == VDEV_STATE_HEALTHY) {
if (vs->vs_rebuild_processed != 0) {
(void) printf(gettext(" (resilvering)"));
}
}
if (cb->vcdl != NULL) {
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
printf(" ");
zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
}
}
/* Display vdev initialization and trim status for leaves. */
if (children == 0) {
print_status_initialize(vs, cb->cb_print_vdev_init);
print_status_trim(vs, cb->cb_print_vdev_trim);
}
(void) printf("\n");
for (c = 0; c < children; c++) {
uint64_t islog = B_FALSE, ishole = B_FALSE;
/* Don't print logs or holes here */
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&islog);
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&ishole);
if (islog || ishole)
continue;
/* Only print normal classes here */
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
/* Provide vdev_rebuild_stats to children if available */
if (vrs == NULL) {
(void) nvlist_lookup_uint64_array(nv,
ZPOOL_CONFIG_REBUILD_STATS,
(uint64_t **)&vrs, &i);
}
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_status_config(zhp, cb, vname, child[c], depth + 2,
isspare, vrs);
free(vname);
}
}
/*
* Print the configuration of an exported pool. Iterate over all vdevs in the
* pool, printing out the name and status for each one.
*/
static void
print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,
int depth)
{
nvlist_t **child;
uint_t c, children;
vdev_stat_t *vs;
const char *type;
char *vname;
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||
strcmp(type, VDEV_TYPE_HOLE) == 0)
return;
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
(void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);
(void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));
if (vs->vs_aux != 0) {
(void) printf(" ");
switch (vs->vs_aux) {
case VDEV_AUX_OPEN_FAILED:
(void) printf(gettext("cannot open"));
break;
case VDEV_AUX_BAD_GUID_SUM:
(void) printf(gettext("missing device"));
break;
case VDEV_AUX_NO_REPLICAS:
(void) printf(gettext("insufficient replicas"));
break;
case VDEV_AUX_VERSION_NEWER:
(void) printf(gettext("newer version"));
break;
case VDEV_AUX_UNSUP_FEAT:
(void) printf(gettext("unsupported feature(s)"));
break;
case VDEV_AUX_ERR_EXCEEDED:
(void) printf(gettext("too many errors"));
break;
case VDEV_AUX_ACTIVE:
(void) printf(gettext("currently in use"));
break;
case VDEV_AUX_CHILDREN_OFFLINE:
(void) printf(gettext("all children offline"));
break;
case VDEV_AUX_BAD_LABEL:
(void) printf(gettext("invalid label"));
break;
default:
(void) printf(gettext("corrupted data"));
break;
}
}
(void) printf("\n");
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return;
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (is_log)
continue;
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
vname = zpool_vdev_name(g_zfs, NULL, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_import_config(cb, vname, child[c], depth + 2);
free(vname);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0) {
(void) printf(gettext("\tcache\n"));
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, NULL, child[c],
cb->cb_name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
(void) printf(gettext("\tspares\n"));
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, NULL, child[c],
cb->cb_name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
}
/*
* Print specialized class vdevs.
*
* These are recorded as top level vdevs in the main pool child array
* but with "is_log" set to 1 or an "alloc_bias" string. We use either
* print_status_config() or print_import_config() to print the top level
* class vdevs then any of their children (eg mirrored slogs) are printed
* recursively - which works because only the top level vdev is marked.
*/
static void
print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
const char *class)
{
uint_t c, children;
nvlist_t **child;
boolean_t printed = B_FALSE;
assert(zhp != NULL || !cb->cb_verbose);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0)
return;
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE;
const char *bias = NULL;
const char *type = NULL;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (is_log) {
bias = (char *)VDEV_ALLOC_CLASS_LOGS;
} else {
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_TYPE, &type);
}
if (bias == NULL || strcmp(bias, class) != 0)
continue;
if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
continue;
if (!printed) {
(void) printf("\t%s\t\n", gettext(class));
printed = B_TRUE;
}
char *name = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
if (cb->cb_print_status)
print_status_config(zhp, cb, name, child[c], 2,
B_FALSE, NULL);
else
print_import_config(cb, name, child[c], 2);
free(name);
}
}
/*
* Display the status for the given pool.
*/
static int
show_import(nvlist_t *config, boolean_t report_error)
{
uint64_t pool_state;
vdev_stat_t *vs;
const char *name;
uint64_t guid;
uint64_t hostid = 0;
const char *msgid;
const char *hostname = "unknown";
nvlist_t *nvroot, *nvinfo;
zpool_status_t reason;
zpool_errata_t errata;
const char *health;
uint_t vsc;
const char *comment;
status_cbdata_t cb = { 0 };
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&guid) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&pool_state) == 0);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) == 0);
health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
reason = zpool_import_status(config, &msgid, &errata);
/*
* If we're importing using a cachefile, then we won't report any
* errors unless we are in the scan phase of the import.
*/
if (reason != ZPOOL_STATUS_OK && !report_error)
return (reason);
(void) printf(gettext(" pool: %s\n"), name);
(void) printf(gettext(" id: %llu\n"), (u_longlong_t)guid);
(void) printf(gettext(" state: %s"), health);
if (pool_state == POOL_STATE_DESTROYED)
(void) printf(gettext(" (DESTROYED)"));
(void) printf("\n");
switch (reason) {
case ZPOOL_STATUS_MISSING_DEV_R:
case ZPOOL_STATUS_MISSING_DEV_NR:
case ZPOOL_STATUS_BAD_GUID_SUM:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"missing from the system.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_R:
case ZPOOL_STATUS_CORRUPT_LABEL_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices contains"
" corrupted data.\n"));
break;
case ZPOOL_STATUS_CORRUPT_DATA:
(void) printf(
gettext(" status: The pool data is corrupted.\n"));
break;
case ZPOOL_STATUS_OFFLINE_DEV:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices "
"are offlined.\n"));
break;
case ZPOOL_STATUS_CORRUPT_POOL:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool metadata is "
"corrupted.\n"));
break;
case ZPOOL_STATUS_VERSION_OLDER:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
"a legacy on-disk version.\n"));
break;
case ZPOOL_STATUS_VERSION_NEWER:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
"an incompatible version.\n"));
break;
case ZPOOL_STATUS_FEAT_DISABLED:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Some supported "
"features are not enabled on the pool.\n\t"
"(Note that they may be intentionally disabled "
"if the\n\t'compatibility' property is set.)\n"));
break;
case ZPOOL_STATUS_COMPATIBILITY_ERR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Error reading or parsing "
"the file(s) indicated by the 'compatibility'\n"
"property.\n"));
break;
case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more features "
"are enabled on the pool despite not being\n"
"requested by the 'compatibility' property.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool uses the following "
"feature(s) not supported on this system:\n"));
color_start(ANSI_YELLOW);
zpool_print_unsup_feat(config);
color_end();
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool can only be "
"accessed in read-only mode on this system. It\n\tcannot be"
" accessed in read-write mode because it uses the "
"following\n\tfeature(s) not supported on this system:\n"));
color_start(ANSI_YELLOW);
zpool_print_unsup_feat(config);
color_end();
break;
case ZPOOL_STATUS_HOSTID_ACTIVE:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is currently "
"imported by another system.\n"));
break;
case ZPOOL_STATUS_HOSTID_REQUIRED:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool has the "
"multihost property on. It cannot\n\tbe safely imported "
"when the system hostid is not set.\n"));
break;
case ZPOOL_STATUS_HOSTID_MISMATCH:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool was last accessed "
"by another system.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_R:
case ZPOOL_STATUS_FAULTED_DEV_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"faulted.\n"));
break;
case ZPOOL_STATUS_BAD_LOG:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("An intent log record cannot "
"be read.\n"));
break;
case ZPOOL_STATUS_RESILVERING:
case ZPOOL_STATUS_REBUILDING:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices were "
"being resilvered.\n"));
break;
case ZPOOL_STATUS_ERRATA:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
errata);
break;
case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"configured to use a non-native block size.\n"
"\tExpect reduced performance.\n"));
break;
default:
/*
* No other status can be seen when importing pools.
*/
assert(reason == ZPOOL_STATUS_OK);
}
/*
* Print out an action according to the overall state of the pool.
*/
if (vs->vs_state == VDEV_STATE_HEALTHY) {
if (reason == ZPOOL_STATUS_VERSION_OLDER ||
reason == ZPOOL_STATUS_FEAT_DISABLED) {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric identifier, "
"though\n\tsome features will not be available "
"without an explicit 'zpool upgrade'.\n"));
} else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric\n\tidentifier, "
"though the file(s) indicated by its "
"'compatibility'\n\tproperty cannot be parsed at "
"this time.\n"));
} else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric "
"identifier and\n\tthe '-f' flag.\n"));
} else if (reason == ZPOOL_STATUS_ERRATA) {
switch (errata) {
case ZPOOL_ERRATA_NONE:
break;
case ZPOOL_ERRATA_ZOL_2094_SCRUB:
(void) printf(gettext(" action: The pool can "
"be imported using its name or numeric "
"identifier,\n\thowever there is a compat"
"ibility issue which should be corrected"
"\n\tby running 'zpool scrub'\n"));
break;
case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:
(void) printf(gettext(" action: The pool can"
"not be imported with this version of ZFS "
"due to\n\tan active asynchronous destroy. "
"Revert to an earlier version\n\tand "
"allow the destroy to complete before "
"updating.\n"));
break;
case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
(void) printf(gettext(" action: Existing "
"encrypted datasets contain an on-disk "
"incompatibility, which\n\tneeds to be "
"corrected. Backup these datasets to new "
"encrypted datasets\n\tand destroy the "
"old ones.\n"));
break;
case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
(void) printf(gettext(" action: Existing "
"encrypted snapshots and bookmarks contain "
"an on-disk\n\tincompatibility. This may "
"cause on-disk corruption if they are used"
"\n\twith 'zfs recv'. To correct the "
"issue, enable the bookmark_v2 feature.\n\t"
"No additional action is needed if there "
"are no encrypted snapshots or\n\t"
"bookmarks. If preserving the encrypted "
"snapshots and bookmarks is\n\trequired, "
"use a non-raw send to backup and restore "
"them. Alternately,\n\tthey may be removed"
" to resolve the incompatibility.\n"));
break;
default:
/*
* All errata must contain an action message.
*/
assert(0);
}
} else {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric "
"identifier.\n"));
}
} else if (vs->vs_state == VDEV_STATE_DEGRADED) {
(void) printf(gettext(" action: The pool can be imported "
"despite missing or damaged devices. The\n\tfault "
"tolerance of the pool may be compromised if imported.\n"));
} else {
switch (reason) {
case ZPOOL_STATUS_VERSION_NEWER:
(void) printf(gettext(" action: The pool cannot be "
"imported. Access the pool on a system running "
"newer\n\tsoftware, or recreate the pool from "
"backup.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("The pool cannot be "
"imported. Access the pool on a system that "
"supports\n\tthe required feature(s), or recreate "
"the pool from backup.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("The pool cannot be "
"imported in read-write mode. Import the pool "
"with\n"
"\t\"-o readonly=on\", access the pool on a system "
"that supports the\n\trequired feature(s), or "
"recreate the pool from backup.\n"));
break;
case ZPOOL_STATUS_MISSING_DEV_R:
case ZPOOL_STATUS_MISSING_DEV_NR:
case ZPOOL_STATUS_BAD_GUID_SUM:
(void) printf(gettext(" action: The pool cannot be "
"imported. Attach the missing\n\tdevices and try "
"again.\n"));
break;
case ZPOOL_STATUS_HOSTID_ACTIVE:
VERIFY0(nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo));
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
hostname = fnvlist_lookup_string(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
hostid = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID);
(void) printf(gettext(" action: The pool must be "
"exported from %s (hostid=%"PRIx64")\n\tbefore it "
"can be safely imported.\n"), hostname, hostid);
break;
case ZPOOL_STATUS_HOSTID_REQUIRED:
(void) printf(gettext(" action: Set a unique system "
"hostid with the zgenhostid(8) command.\n"));
break;
default:
(void) printf(gettext(" action: The pool cannot be "
"imported due to damaged devices or data.\n"));
}
}
/* Print the comment attached to the pool. */
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
(void) printf(gettext("comment: %s\n"), comment);
/*
* If the state is "closed" or "can't open", and the aux state
* is "corrupt data":
*/
if (((vs->vs_state == VDEV_STATE_CLOSED) ||
(vs->vs_state == VDEV_STATE_CANT_OPEN)) &&
(vs->vs_aux == VDEV_AUX_CORRUPT_DATA)) {
if (pool_state == POOL_STATE_DESTROYED)
(void) printf(gettext("\tThe pool was destroyed, "
"but can be imported using the '-Df' flags.\n"));
else if (pool_state != POOL_STATE_EXPORTED)
(void) printf(gettext("\tThe pool may be active on "
"another system, but can be imported using\n\t"
"the '-f' flag.\n"));
}
if (msgid != NULL) {
(void) printf(gettext(
" see: https://openzfs.github.io/openzfs-docs/msg/%s\n"),
msgid);
}
(void) printf(gettext(" config:\n\n"));
cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),
VDEV_NAME_TYPE_ID);
if (cb.cb_namewidth < 10)
cb.cb_namewidth = 10;
print_import_config(&cb, name, nvroot, 0);
print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP);
print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS);
if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
(void) printf(gettext("\n\tAdditional devices are known to "
"be part of this pool, though their\n\texact "
"configuration cannot be determined.\n"));
}
return (0);
}
static boolean_t
zfs_force_import_required(nvlist_t *config)
{
uint64_t state;
uint64_t hostid = 0;
nvlist_t *nvinfo;
state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())
return (B_TRUE);
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {
mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_STATE);
if (mmp_state != MMP_STATE_INACTIVE)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Perform the import for the given configuration. This passes the heavy
* lifting off to zpool_import_props(), and then mounts the datasets contained
* within the pool.
*/
static int
do_import(nvlist_t *config, const char *newname, const char *mntopts,
nvlist_t *props, int flags)
{
int ret = 0;
+ int ms_status = 0;
zpool_handle_t *zhp;
const char *name;
uint64_t version;
name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
if (!SPA_VERSION_IS_SUPPORTED(version)) {
(void) fprintf(stderr, gettext("cannot import '%s': pool "
"is formatted using an unsupported ZFS version\n"), name);
return (1);
} else if (zfs_force_import_required(config) &&
!(flags & ZFS_IMPORT_ANY_HOST)) {
mmp_state_t mmp_state = MMP_STATE_INACTIVE;
nvlist_t *nvinfo;
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))
mmp_state = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_STATE);
if (mmp_state == MMP_STATE_ACTIVE) {
const char *hostname = "<unknown>";
uint64_t hostid = 0;
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
hostname = fnvlist_lookup_string(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
hostid = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID);
(void) fprintf(stderr, gettext("cannot import '%s': "
"pool is imported on %s (hostid: "
"0x%"PRIx64")\nExport the pool on the other "
"system, then run 'zpool import'.\n"),
name, hostname, hostid);
} else if (mmp_state == MMP_STATE_NO_HOSTID) {
(void) fprintf(stderr, gettext("Cannot import '%s': "
"pool has the multihost property on and the\n"
"system's hostid is not set. Set a unique hostid "
"with the zgenhostid(8) command.\n"), name);
} else {
const char *hostname = "<unknown>";
time_t timestamp = 0;
uint64_t hostid = 0;
if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))
hostname = fnvlist_lookup_string(config,
ZPOOL_CONFIG_HOSTNAME);
if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))
timestamp = fnvlist_lookup_uint64(config,
ZPOOL_CONFIG_TIMESTAMP);
if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))
hostid = fnvlist_lookup_uint64(config,
ZPOOL_CONFIG_HOSTID);
(void) fprintf(stderr, gettext("cannot import '%s': "
"pool was previously in use from another system.\n"
"Last accessed by %s (hostid=%"PRIx64") at %s"
"The pool can be imported, use 'zpool import -f' "
"to import the pool.\n"), name, hostname,
hostid, ctime(&timestamp));
}
return (1);
}
if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)
return (1);
if (newname != NULL)
name = newname;
if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)
return (1);
/*
* Loading keys is best effort. We don't want to return immediately
* if it fails but we do want to give the error to the caller.
*/
if (flags & ZFS_IMPORT_LOAD_KEYS &&
zfs_crypto_attempt_load_keys(g_zfs, name) != 0)
ret = 1;
if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
- !(flags & ZFS_IMPORT_ONLY) &&
- zpool_enable_datasets(zhp, mntopts, 0) != 0) {
- zpool_close(zhp);
- return (1);
+ !(flags & ZFS_IMPORT_ONLY)) {
+ ms_status = zpool_enable_datasets(zhp, mntopts, 0);
+ if (ms_status == EZFS_SHAREFAILED) {
+ (void) fprintf(stderr, gettext("Import was "
+ "successful, but unable to share some datasets"));
+ } else if (ms_status == EZFS_MOUNTFAILED) {
+ (void) fprintf(stderr, gettext("Import was "
+ "successful, but unable to mount some datasets"));
+ }
}
zpool_close(zhp);
return (ret);
}
static int
import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
char *orig_name, char *new_name,
boolean_t do_destroyed, boolean_t pool_specified, boolean_t do_all,
importargs_t *import)
{
nvlist_t *config = NULL;
nvlist_t *found_config = NULL;
uint64_t pool_state;
/*
* At this point we have a list of import candidate configs. Even if
* we were searching by pool name or guid, we still need to
* post-process the list to deal with pool state and possible
* duplicate names.
*/
int err = 0;
nvpair_t *elem = NULL;
boolean_t first = B_TRUE;
while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
verify(nvpair_value_nvlist(elem, &config) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&pool_state) == 0);
if (!do_destroyed && pool_state == POOL_STATE_DESTROYED)
continue;
if (do_destroyed && pool_state != POOL_STATE_DESTROYED)
continue;
verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
import->policy) == 0);
if (!pool_specified) {
if (first)
first = B_FALSE;
else if (!do_all)
(void) fputc('\n', stdout);
if (do_all) {
err |= do_import(config, NULL, mntopts,
props, flags);
} else {
/*
* If we're importing from cachefile, then
* we don't want to report errors until we
* are in the scan phase of the import. If
* we get an error, then we return that error
* to invoke the scan phase.
*/
if (import->cachefile && !import->scan)
err = show_import(config, B_FALSE);
else
(void) show_import(config, B_TRUE);
}
} else if (import->poolname != NULL) {
const char *name;
/*
* We are searching for a pool based on name.
*/
verify(nvlist_lookup_string(config,
ZPOOL_CONFIG_POOL_NAME, &name) == 0);
if (strcmp(name, import->poolname) == 0) {
if (found_config != NULL) {
(void) fprintf(stderr, gettext(
"cannot import '%s': more than "
"one matching pool\n"),
import->poolname);
(void) fprintf(stderr, gettext(
"import by numeric ID instead\n"));
err = B_TRUE;
}
found_config = config;
}
} else {
uint64_t guid;
/*
* Search for a pool by guid.
*/
verify(nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
if (guid == import->guid)
found_config = config;
}
}
/*
* If we were searching for a specific pool, verify that we found a
* pool, and then do the import.
*/
if (pool_specified && err == 0) {
if (found_config == NULL) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"no such pool available\n"), orig_name);
err = B_TRUE;
} else {
err |= do_import(found_config, new_name,
mntopts, props, flags);
}
}
/*
* If we were just looking for pools, report an error if none were
* found.
*/
if (!pool_specified && first)
(void) fprintf(stderr,
gettext("no pools available to import\n"));
return (err);
}
typedef struct target_exists_args {
const char *poolname;
uint64_t poolguid;
} target_exists_args_t;
static int
name_or_guid_exists(zpool_handle_t *zhp, void *data)
{
target_exists_args_t *args = data;
nvlist_t *config = zpool_get_config(zhp, NULL);
int found = 0;
if (config == NULL)
return (0);
if (args->poolname != NULL) {
const char *pool_name;
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&pool_name) == 0);
if (strcmp(pool_name, args->poolname) == 0)
found = 1;
} else {
uint64_t pool_guid;
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&pool_guid) == 0);
if (pool_guid == args->poolguid)
found = 1;
}
zpool_close(zhp);
return (found);
}
/*
* zpool checkpoint <pool>
* checkpoint --discard <pool>
*
* -d Discard the checkpoint from a checkpointed
* --discard pool.
*
* -w Wait for discarding a checkpoint to complete.
* --wait
*
* Checkpoints the specified pool, by taking a "snapshot" of its
* current state. A pool can only have one checkpoint at a time.
*/
int
zpool_do_checkpoint(int argc, char **argv)
{
boolean_t discard, wait;
char *pool;
zpool_handle_t *zhp;
int c, err;
struct option long_options[] = {
{"discard", no_argument, NULL, 'd'},
{"wait", no_argument, NULL, 'w'},
{0, 0, 0, 0}
};
discard = B_FALSE;
wait = B_FALSE;
while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) {
switch (c) {
case 'd':
discard = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (wait && !discard) {
(void) fprintf(stderr, gettext("--wait only valid when "
"--discard also specified\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
pool = argv[0];
if ((zhp = zpool_open(g_zfs, pool)) == NULL) {
/* As a special case, check for use of '/' in the name */
if (strchr(pool, '/') != NULL)
(void) fprintf(stderr, gettext("'zpool checkpoint' "
"doesn't work on datasets. To save the state "
"of a dataset from a specific point in time "
"please use 'zfs snapshot'\n"));
return (1);
}
if (discard) {
err = (zpool_discard_checkpoint(zhp) != 0);
if (err == 0 && wait)
err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD);
} else {
err = (zpool_checkpoint(zhp) != 0);
}
zpool_close(zhp);
return (err);
}
#define CHECKPOINT_OPT 1024
/*
* zpool import [-d dir] [-D]
* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
* [-d dir | -c cachefile | -s] [-f] -a
* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
* [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id>
* [newpool]
*
* -c Read pool information from a cachefile instead of searching
* devices. If importing from a cachefile config fails, then
* fallback to searching for devices only in the directories that
* exist in the cachefile.
*
* -d Scan in a specific directory, other than /dev/. More than
* one directory can be specified using multiple '-d' options.
*
* -D Scan for previously destroyed pools or import all or only
* specified destroyed pools.
*
* -R Temporarily import the pool, with all mountpoints relative to
* the given root. The pool will remain exported when the machine
* is rebooted.
*
* -V Import even in the presence of faulted vdevs. This is an
* intentionally undocumented option for testing purposes, and
* treats the pool configuration as complete, leaving any bad
* vdevs in the FAULTED state. In other words, it does verbatim
* import.
*
* -f Force import, even if it appears that the pool is active.
*
* -F Attempt rewind if necessary.
*
* -n See if rewind would work, but don't actually rewind.
*
* -N Import the pool but don't mount datasets.
*
* -T Specify a starting txg to use for import. This option is
* intentionally undocumented option for testing purposes.
*
* -a Import all pools found.
*
* -l Load encryption keys while importing.
*
* -o Set property=value and/or temporary mount options (without '=').
*
* -s Scan using the default search path, the libblkid cache will
* not be consulted.
*
* --rewind-to-checkpoint
* Import the pool and revert back to the checkpoint.
*
* The import command scans for pools to import, and import pools based on pool
* name and GUID. The pool can also be renamed as part of the import process.
*/
int
zpool_do_import(int argc, char **argv)
{
char **searchdirs = NULL;
char *env, *envdup = NULL;
int nsearch = 0;
int c;
int err = 0;
nvlist_t *pools = NULL;
boolean_t do_all = B_FALSE;
boolean_t do_destroyed = B_FALSE;
char *mntopts = NULL;
uint64_t searchguid = 0;
char *searchname = NULL;
char *propval;
nvlist_t *policy = NULL;
nvlist_t *props = NULL;
int flags = ZFS_IMPORT_NORMAL;
uint32_t rewind_policy = ZPOOL_NO_REWIND;
boolean_t dryrun = B_FALSE;
boolean_t do_rewind = B_FALSE;
boolean_t xtreme_rewind = B_FALSE;
boolean_t do_scan = B_FALSE;
boolean_t pool_exists = B_FALSE;
boolean_t pool_specified = B_FALSE;
uint64_t txg = -1ULL;
char *cachefile = NULL;
importargs_t idata = { 0 };
char *endptr;
struct option long_options[] = {
{"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT},
{0, 0, 0, 0}
};
/* check options */
while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX",
long_options, NULL)) != -1) {
switch (c) {
case 'a':
do_all = B_TRUE;
break;
case 'c':
cachefile = optarg;
break;
case 'd':
searchdirs = safe_realloc(searchdirs,
(nsearch + 1) * sizeof (char *));
searchdirs[nsearch++] = optarg;
break;
case 'D':
do_destroyed = B_TRUE;
break;
case 'f':
flags |= ZFS_IMPORT_ANY_HOST;
break;
case 'F':
do_rewind = B_TRUE;
break;
case 'l':
flags |= ZFS_IMPORT_LOAD_KEYS;
break;
case 'm':
flags |= ZFS_IMPORT_MISSING_LOG;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'N':
flags |= ZFS_IMPORT_ONLY;
break;
case 'o':
if ((propval = strchr(optarg, '=')) != NULL) {
*propval = '\0';
propval++;
if (add_prop_list(optarg, propval,
&props, B_TRUE))
goto error;
} else {
mntopts = optarg;
}
break;
case 'R':
if (add_prop_list(zpool_prop_to_name(
ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
goto error;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props))
goto error;
break;
case 's':
do_scan = B_TRUE;
break;
case 't':
flags |= ZFS_IMPORT_TEMP_NAME;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props))
goto error;
break;
case 'T':
errno = 0;
txg = strtoull(optarg, &endptr, 0);
if (errno != 0 || *endptr != '\0') {
(void) fprintf(stderr,
gettext("invalid txg value\n"));
usage(B_FALSE);
}
rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;
break;
case 'V':
flags |= ZFS_IMPORT_VERBATIM;
break;
case 'X':
xtreme_rewind = B_TRUE;
break;
case CHECKPOINT_OPT:
flags |= ZFS_IMPORT_CHECKPOINT;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (cachefile && nsearch != 0) {
(void) fprintf(stderr, gettext("-c is incompatible with -d\n"));
usage(B_FALSE);
}
if (cachefile && do_scan) {
(void) fprintf(stderr, gettext("-c is incompatible with -s\n"));
usage(B_FALSE);
}
if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {
(void) fprintf(stderr, gettext("-l is incompatible with -N\n"));
usage(B_FALSE);
}
if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {
(void) fprintf(stderr, gettext("-l is only meaningful during "
"an import\n"));
usage(B_FALSE);
}
if ((dryrun || xtreme_rewind) && !do_rewind) {
(void) fprintf(stderr,
gettext("-n or -X only meaningful with -F\n"));
usage(B_FALSE);
}
if (dryrun)
rewind_policy = ZPOOL_TRY_REWIND;
else if (do_rewind)
rewind_policy = ZPOOL_DO_REWIND;
if (xtreme_rewind)
rewind_policy |= ZPOOL_EXTREME_REWIND;
/* In the future, we can capture further policy and include it here */
if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 ||
nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
rewind_policy) != 0)
goto error;
/* check argument count */
if (do_all) {
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
} else {
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
}
/*
* Check for the effective uid. We do this explicitly here because
* otherwise any attempt to discover pools will silently fail.
*/
if (argc == 0 && geteuid() != 0) {
(void) fprintf(stderr, gettext("cannot "
"discover pools: permission denied\n"));
free(searchdirs);
nvlist_free(props);
nvlist_free(policy);
return (1);
}
/*
* Depending on the arguments given, we do one of the following:
*
* <none> Iterate through all pools and display information about
* each one.
*
* -a Iterate through all pools and try to import each one.
*
* <id> Find the pool that corresponds to the given GUID/pool
* name and import that one.
*
* -D Above options applies only to destroyed pools.
*/
if (argc != 0) {
char *endptr;
errno = 0;
searchguid = strtoull(argv[0], &endptr, 10);
if (errno != 0 || *endptr != '\0') {
searchname = argv[0];
searchguid = 0;
}
pool_specified = B_TRUE;
/*
* User specified a name or guid. Ensure it's unique.
*/
target_exists_args_t search = {searchname, searchguid};
pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search);
}
/*
* Check the environment for the preferred search path.
*/
if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) {
char *dir, *tmp = NULL;
envdup = strdup(env);
for (dir = strtok_r(envdup, ":", &tmp);
dir != NULL;
dir = strtok_r(NULL, ":", &tmp)) {
searchdirs = safe_realloc(searchdirs,
(nsearch + 1) * sizeof (char *));
searchdirs[nsearch++] = dir;
}
}
idata.path = searchdirs;
idata.paths = nsearch;
idata.poolname = searchname;
idata.guid = searchguid;
idata.cachefile = cachefile;
idata.scan = do_scan;
idata.policy = policy;
libpc_handle_t lpch = {
.lpc_lib_handle = g_zfs,
.lpc_ops = &libzfs_config_ops,
.lpc_printerr = B_TRUE
};
pools = zpool_search_import(&lpch, &idata);
if (pools != NULL && pool_exists &&
(argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"a pool with that name already exists\n"),
argv[0]);
(void) fprintf(stderr, gettext("use the form '%s "
"<pool | id> <newpool>' to give it a new name\n"),
"zpool import");
err = 1;
} else if (pools == NULL && pool_exists) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"a pool with that name is already created/imported,\n"),
argv[0]);
(void) fprintf(stderr, gettext("and no additional pools "
"with that name were found\n"));
err = 1;
} else if (pools == NULL) {
if (argc != 0) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"no such pool available\n"), argv[0]);
}
err = 1;
}
if (err == 1) {
free(searchdirs);
free(envdup);
nvlist_free(policy);
nvlist_free(pools);
nvlist_free(props);
return (1);
}
err = import_pools(pools, props, mntopts, flags,
argc >= 1 ? argv[0] : NULL,
argc >= 2 ? argv[1] : NULL,
do_destroyed, pool_specified, do_all, &idata);
/*
* If we're using the cachefile and we failed to import, then
* fallback to scanning the directory for pools that match
* those in the cachefile.
*/
if (err != 0 && cachefile != NULL) {
(void) printf(gettext("cachefile import failed, retrying\n"));
/*
* We use the scan flag to gather the directories that exist
* in the cachefile. If we need to fallback to searching for
* the pool config, we will only search devices in these
* directories.
*/
idata.scan = B_TRUE;
nvlist_free(pools);
pools = zpool_search_import(&lpch, &idata);
err = import_pools(pools, props, mntopts, flags,
argc >= 1 ? argv[0] : NULL,
argc >= 2 ? argv[1] : NULL,
do_destroyed, pool_specified, do_all, &idata);
}
error:
nvlist_free(props);
nvlist_free(pools);
nvlist_free(policy);
free(searchdirs);
free(envdup);
return (err ? 1 : 0);
}
/*
* zpool sync [-f] [pool] ...
*
* -f (undocumented) force uberblock (and config including zpool cache file)
* update.
*
* Sync the specified pool(s).
* Without arguments "zpool sync" will sync all pools.
* This command initiates TXG sync(s) and will return after the TXG(s) commit.
*
*/
static int
zpool_do_sync(int argc, char **argv)
{
int ret;
boolean_t force = B_FALSE;
/* check options */
while ((ret = getopt(argc, argv, "f")) != -1) {
switch (ret) {
case 'f':
force = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* if argc == 0 we will execute zpool_sync_one on all pools */
ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
B_FALSE, zpool_sync_one, &force);
return (ret);
}
typedef struct iostat_cbdata {
uint64_t cb_flags;
int cb_namewidth;
int cb_iteration;
boolean_t cb_verbose;
boolean_t cb_literal;
boolean_t cb_scripted;
zpool_list_t *cb_list;
vdev_cmd_data_list_t *vcdl;
vdev_cbdata_t cb_vdevs;
} iostat_cbdata_t;
/* iostat labels */
typedef struct name_and_columns {
const char *name; /* Column name */
unsigned int columns; /* Center name to this number of columns */
} name_and_columns_t;
#define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */
static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =
{
[IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},
{NULL}},
[IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
{"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1},
{NULL}},
[IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},
{"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},
{"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}},
[IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
{"asyncq_wait", 2}, {NULL}},
[IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},
{"async_read", 2}, {"async_write", 2}, {"scrub", 2},
{"trim", 2}, {"rebuild", 2}, {NULL}},
};
/* Shorthand - if "columns" field not set, default to 1 column */
static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =
{
[IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},
{"write"}, {NULL}},
[IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
{"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"},
{NULL}},
[IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},
{"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"},
{"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}},
[IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
{"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"},
{NULL}},
[IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
{"ind"}, {"agg"}, {NULL}},
};
static const char *histo_to_title[] = {
[IOS_L_HISTO] = "latency",
[IOS_RQ_HISTO] = "req_size",
};
/*
* Return the number of labels in a null-terminated name_and_columns_t
* array.
*
*/
static unsigned int
label_array_len(const name_and_columns_t *labels)
{
int i = 0;
while (labels[i].name)
i++;
return (i);
}
/*
* Return the number of strings in a null-terminated string array.
* For example:
*
* const char foo[] = {"bar", "baz", NULL}
*
* returns 2
*/
static uint64_t
str_array_len(const char *array[])
{
uint64_t i = 0;
while (array[i])
i++;
return (i);
}
/*
* Return a default column width for default/latency/queue columns. This does
* not include histograms, which have their columns autosized.
*/
static unsigned int
default_column_width(iostat_cbdata_t *cb, enum iostat_type type)
{
unsigned long column_width = 5; /* Normal niceprint */
static unsigned long widths[] = {
/*
* Choose some sane default column sizes for printing the
* raw numbers.
*/
[IOS_DEFAULT] = 15, /* 1PB capacity */
[IOS_LATENCY] = 10, /* 1B ns = 10sec */
[IOS_QUEUES] = 6, /* 1M queue entries */
[IOS_L_HISTO] = 10, /* 1B ns = 10sec */
[IOS_RQ_HISTO] = 6, /* 1M queue entries */
};
if (cb->cb_literal)
column_width = widths[type];
return (column_width);
}
/*
* Print the column labels, i.e:
*
* capacity operations bandwidth
* alloc free read write read write ...
*
* If force_column_width is set, use it for the column width. If not set, use
* the default column width.
*/
static void
print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
const name_and_columns_t labels[][IOSTAT_MAX_LABELS])
{
int i, idx, s;
int text_start, rw_column_width, spaces_to_end;
uint64_t flags = cb->cb_flags;
uint64_t f;
unsigned int column_width = force_column_width;
/* For each bit set in flags */
for (f = flags; f; f &= ~(1ULL << idx)) {
idx = lowbit64(f) - 1;
if (!force_column_width)
column_width = default_column_width(cb, idx);
/* Print our top labels centered over "read write" label. */
for (i = 0; i < label_array_len(labels[idx]); i++) {
const char *name = labels[idx][i].name;
/*
* We treat labels[][].columns == 0 as shorthand
* for one column. It makes writing out the label
* tables more concise.
*/
unsigned int columns = MAX(1, labels[idx][i].columns);
unsigned int slen = strlen(name);
rw_column_width = (column_width * columns) +
(2 * (columns - 1));
text_start = (int)((rw_column_width) / columns -
slen / columns);
if (text_start < 0)
text_start = 0;
printf(" "); /* Two spaces between columns */
/* Space from beginning of column to label */
for (s = 0; s < text_start; s++)
printf(" ");
printf("%s", name);
/* Print space after label to end of column */
spaces_to_end = rw_column_width - text_start - slen;
if (spaces_to_end < 0)
spaces_to_end = 0;
for (s = 0; s < spaces_to_end; s++)
printf(" ");
}
}
}
/*
* print_cmd_columns - Print custom column titles from -c
*
* If the user specified the "zpool status|iostat -c" then print their custom
* column titles in the header. For example, print_cmd_columns() would print
* the " col1 col2" part of this:
*
* $ zpool iostat -vc 'echo col1=val1; echo col2=val2'
* ...
* capacity operations bandwidth
* pool alloc free read write read write col1 col2
* ---------- ----- ----- ----- ----- ----- ----- ---- ----
* mypool 269K 1008M 0 0 107 946
* mirror 269K 1008M 0 0 107 946
* sdb - - 0 0 102 473 val1 val2
* sdc - - 0 0 5 473 val1 val2
* ---------- ----- ----- ----- ----- ----- ----- ---- ----
*/
static void
print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
{
int i, j;
vdev_cmd_data_t *data = &vcdl->data[0];
if (vcdl->count == 0 || data == NULL)
return;
/*
* Each vdev cmd should have the same column names unless the user did
* something weird with their cmd. Just take the column names from the
* first vdev and assume it works for all of them.
*/
for (i = 0; i < vcdl->uniq_cols_cnt; i++) {
printf(" ");
if (use_dashes) {
for (j = 0; j < vcdl->uniq_cols_width[i]; j++)
printf("-");
} else {
printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i],
vcdl->uniq_cols[i]);
}
}
}
/*
* Utility function to print out a line of dashes like:
*
* -------------------------------- ----- ----- ----- ----- -----
*
* ...or a dashed named-row line like:
*
* logs - - - - -
*
* @cb: iostat data
*
* @force_column_width If non-zero, use the value as the column width.
* Otherwise use the default column widths.
*
* @name: Print a dashed named-row line starting
* with @name. Otherwise, print a regular
* dashed line.
*/
static void
print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,
const char *name)
{
int i;
unsigned int namewidth;
uint64_t flags = cb->cb_flags;
uint64_t f;
int idx;
const name_and_columns_t *labels;
const char *title;
if (cb->cb_flags & IOS_ANYHISTO_M) {
title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
} else if (cb->cb_vdevs.cb_names_count) {
title = "vdev";
} else {
title = "pool";
}
namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
name ? strlen(name) : 0);
if (name) {
printf("%-*s", namewidth, name);
} else {
for (i = 0; i < namewidth; i++)
(void) printf("-");
}
/* For each bit in flags */
for (f = flags; f; f &= ~(1ULL << idx)) {
unsigned int column_width;
idx = lowbit64(f) - 1;
if (force_column_width)
column_width = force_column_width;
else
column_width = default_column_width(cb, idx);
labels = iostat_bottom_labels[idx];
for (i = 0; i < label_array_len(labels); i++) {
if (name)
printf(" %*s-", column_width - 1, " ");
else
printf(" %.*s", column_width,
"--------------------");
}
}
}
static void
print_iostat_separator_impl(iostat_cbdata_t *cb,
unsigned int force_column_width)
{
print_iostat_dashes(cb, force_column_width, NULL);
}
static void
print_iostat_separator(iostat_cbdata_t *cb)
{
print_iostat_separator_impl(cb, 0);
}
static void
print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,
const char *histo_vdev_name)
{
unsigned int namewidth;
const char *title;
color_start(ANSI_BOLD);
if (cb->cb_flags & IOS_ANYHISTO_M) {
title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
} else if (cb->cb_vdevs.cb_names_count) {
title = "vdev";
} else {
title = "pool";
}
namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
histo_vdev_name ? strlen(histo_vdev_name) : 0);
if (histo_vdev_name)
printf("%-*s", namewidth, histo_vdev_name);
else
printf("%*s", namewidth, "");
print_iostat_labels(cb, force_column_width, iostat_top_labels);
printf("\n");
printf("%-*s", namewidth, title);
print_iostat_labels(cb, force_column_width, iostat_bottom_labels);
if (cb->vcdl != NULL)
print_cmd_columns(cb->vcdl, 0);
printf("\n");
print_iostat_separator_impl(cb, force_column_width);
if (cb->vcdl != NULL)
print_cmd_columns(cb->vcdl, 1);
color_end();
printf("\n");
}
static void
print_iostat_header(iostat_cbdata_t *cb)
{
print_iostat_header_impl(cb, 0, NULL);
}
/*
* Prints a size string (i.e. 120M) with the suffix ("M") colored
* by order of magnitude. Uses column_size to add padding.
*/
static void
print_stat_color(const char *statbuf, unsigned int column_size)
{
fputs(" ", stdout);
size_t len = strlen(statbuf);
while (len < column_size) {
fputc(' ', stdout);
column_size--;
}
if (*statbuf == '0') {
color_start(ANSI_GRAY);
fputc('0', stdout);
} else {
for (; *statbuf; statbuf++) {
if (*statbuf == 'K') color_start(ANSI_GREEN);
else if (*statbuf == 'M') color_start(ANSI_YELLOW);
else if (*statbuf == 'G') color_start(ANSI_RED);
else if (*statbuf == 'T') color_start(ANSI_BOLD_BLUE);
else if (*statbuf == 'P') color_start(ANSI_MAGENTA);
else if (*statbuf == 'E') color_start(ANSI_CYAN);
fputc(*statbuf, stdout);
if (--column_size <= 0)
break;
}
}
color_end();
}
/*
* Display a single statistic.
*/
static void
print_one_stat(uint64_t value, enum zfs_nicenum_format format,
unsigned int column_size, boolean_t scripted)
{
char buf[64];
zfs_nicenum_format(value, buf, sizeof (buf), format);
if (scripted)
printf("\t%s", buf);
else
print_stat_color(buf, column_size);
}
/*
* Calculate the default vdev stats
*
* Subtract oldvs from newvs, apply a scaling factor, and save the resulting
* stats into calcvs.
*/
static void
calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs,
vdev_stat_t *calcvs)
{
int i;
memcpy(calcvs, newvs, sizeof (*calcvs));
for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++)
calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]);
for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++)
calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]);
}
/*
* Internal representation of the extended iostats data.
*
* The extended iostat stats are exported in nvlists as either uint64_t arrays
* or single uint64_t's. We make both look like arrays to make them easier
* to process. In order to make single uint64_t's look like arrays, we set
* __data to the stat data, and then set *data = &__data with count = 1. Then,
* we can just use *data and count.
*/
struct stat_array {
uint64_t *data;
uint_t count; /* Number of entries in data[] */
uint64_t __data; /* Only used when data is a single uint64_t */
};
static uint64_t
stat_histo_max(struct stat_array *nva, unsigned int len)
{
uint64_t max = 0;
int i;
for (i = 0; i < len; i++)
max = MAX(max, array64_max(nva[i].data, nva[i].count));
return (max);
}
/*
* Helper function to lookup a uint64_t array or uint64_t value and store its
* data as a stat_array. If the nvpair is a single uint64_t value, then we make
* it look like a one element array to make it easier to process.
*/
static int
nvpair64_to_stat_array(nvlist_t *nvl, const char *name,
struct stat_array *nva)
{
nvpair_t *tmp;
int ret;
verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0);
switch (nvpair_type(tmp)) {
case DATA_TYPE_UINT64_ARRAY:
ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count);
break;
case DATA_TYPE_UINT64:
ret = nvpair_value_uint64(tmp, &nva->__data);
nva->data = &nva->__data;
nva->count = 1;
break;
default:
/* Not a uint64_t */
ret = EINVAL;
break;
}
return (ret);
}
/*
* Given a list of nvlist names, look up the extended stats in newnv and oldnv,
* subtract them, and return the results in a newly allocated stat_array.
* You must free the returned array after you are done with it with
* free_calc_stats().
*
* Additionally, you can set "oldnv" to NULL if you simply want the newnv
* values.
*/
static struct stat_array *
calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv,
nvlist_t *newnv)
{
nvlist_t *oldnvx = NULL, *newnvx;
struct stat_array *oldnva, *newnva, *calcnva;
int i, j;
unsigned int alloc_size = (sizeof (struct stat_array)) * len;
/* Extract our extended stats nvlist from the main list */
verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX,
&newnvx) == 0);
if (oldnv) {
verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX,
&oldnvx) == 0);
}
newnva = safe_malloc(alloc_size);
oldnva = safe_malloc(alloc_size);
calcnva = safe_malloc(alloc_size);
for (j = 0; j < len; j++) {
verify(nvpair64_to_stat_array(newnvx, names[j],
&newnva[j]) == 0);
calcnva[j].count = newnva[j].count;
alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]);
calcnva[j].data = safe_malloc(alloc_size);
memcpy(calcnva[j].data, newnva[j].data, alloc_size);
if (oldnvx) {
verify(nvpair64_to_stat_array(oldnvx, names[j],
&oldnva[j]) == 0);
for (i = 0; i < oldnva[j].count; i++)
calcnva[j].data[i] -= oldnva[j].data[i];
}
}
free(newnva);
free(oldnva);
return (calcnva);
}
static void
free_calc_stats(struct stat_array *nva, unsigned int len)
{
int i;
for (i = 0; i < len; i++)
free(nva[i].data);
free(nva);
}
static void
print_iostat_histo(struct stat_array *nva, unsigned int len,
iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth,
double scale)
{
int i, j;
char buf[6];
uint64_t val;
enum zfs_nicenum_format format;
unsigned int buckets;
unsigned int start_bucket;
if (cb->cb_literal)
format = ZFS_NICENUM_RAW;
else
format = ZFS_NICENUM_1024;
/* All these histos are the same size, so just use nva[0].count */
buckets = nva[0].count;
if (cb->cb_flags & IOS_RQ_HISTO_M) {
/* Start at 512 - req size should never be lower than this */
start_bucket = 9;
} else {
start_bucket = 0;
}
for (j = start_bucket; j < buckets; j++) {
/* Print histogram bucket label */
if (cb->cb_flags & IOS_L_HISTO_M) {
/* Ending range of this bucket */
val = (1UL << (j + 1)) - 1;
zfs_nicetime(val, buf, sizeof (buf));
} else {
/* Request size (starting range of bucket) */
val = (1UL << j);
zfs_nicenum(val, buf, sizeof (buf));
}
if (cb->cb_scripted)
printf("%llu", (u_longlong_t)val);
else
printf("%-*s", namewidth, buf);
/* Print the values on the line */
for (i = 0; i < len; i++) {
print_one_stat(nva[i].data[j] * scale, format,
column_width, cb->cb_scripted);
}
printf("\n");
}
}
static void
print_solid_separator(unsigned int length)
{
while (length--)
printf("-");
printf("\n");
}
static void
print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,
nvlist_t *newnv, double scale, const char *name)
{
unsigned int column_width;
unsigned int namewidth;
unsigned int entire_width;
enum iostat_type type;
struct stat_array *nva;
const char **names;
unsigned int names_len;
/* What type of histo are we? */
type = IOS_HISTO_IDX(cb->cb_flags);
/* Get NULL-terminated array of nvlist names for our histo */
names = vsx_type_to_nvlist[type];
names_len = str_array_len(names); /* num of names */
nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);
if (cb->cb_literal) {
column_width = MAX(5,
(unsigned int) log10(stat_histo_max(nva, names_len)) + 1);
} else {
column_width = 5;
}
namewidth = MAX(cb->cb_namewidth,
strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));
/*
* Calculate the entire line width of what we're printing. The
* +2 is for the two spaces between columns:
*/
/* read write */
/* ----- ----- */
/* |___| <---------- column_width */
/* */
/* |__________| <--- entire_width */
/* */
entire_width = namewidth + (column_width + 2) *
label_array_len(iostat_bottom_labels[type]);
if (cb->cb_scripted)
printf("%s\n", name);
else
print_iostat_header_impl(cb, column_width, name);
print_iostat_histo(nva, names_len, cb, column_width,
namewidth, scale);
free_calc_stats(nva, names_len);
if (!cb->cb_scripted)
print_solid_separator(entire_width);
}
/*
* Calculate the average latency of a power-of-two latency histogram
*/
static uint64_t
single_histo_average(uint64_t *histo, unsigned int buckets)
{
int i;
uint64_t count = 0, total = 0;
for (i = 0; i < buckets; i++) {
/*
* Our buckets are power-of-two latency ranges. Use the
* midpoint latency of each bucket to calculate the average.
* For example:
*
* Bucket Midpoint
* 8ns-15ns: 12ns
* 16ns-31ns: 24ns
* ...
*/
if (histo[i] != 0) {
total += histo[i] * (((1UL << i) + ((1UL << i)/2)));
count += histo[i];
}
}
/* Prevent divide by zero */
return (count == 0 ? 0 : total / count);
}
static void
print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv)
{
const char *names[] = {
ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
};
struct stat_array *nva;
unsigned int column_width = default_column_width(cb, IOS_QUEUES);
enum zfs_nicenum_format format;
nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv);
if (cb->cb_literal)
format = ZFS_NICENUM_RAW;
else
format = ZFS_NICENUM_1024;
for (int i = 0; i < ARRAY_SIZE(names); i++) {
uint64_t val = nva[i].data[0];
print_one_stat(val, format, column_width, cb->cb_scripted);
}
free_calc_stats(nva, ARRAY_SIZE(names));
}
static void
print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
nvlist_t *newnv)
{
int i;
uint64_t val;
const char *names[] = {
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
};
struct stat_array *nva;
unsigned int column_width = default_column_width(cb, IOS_LATENCY);
enum zfs_nicenum_format format;
nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);
if (cb->cb_literal)
format = ZFS_NICENUM_RAWTIME;
else
format = ZFS_NICENUM_TIME;
/* Print our avg latencies on the line */
for (i = 0; i < ARRAY_SIZE(names); i++) {
/* Compute average latency for a latency histo */
val = single_histo_average(nva[i].data, nva[i].count);
print_one_stat(val, format, column_width, cb->cb_scripted);
}
free_calc_stats(nva, ARRAY_SIZE(names));
}
/*
* Print default statistics (capacity/operations/bandwidth)
*/
static void
print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale)
{
unsigned int column_width = default_column_width(cb, IOS_DEFAULT);
enum zfs_nicenum_format format;
char na; /* char to print for "not applicable" values */
if (cb->cb_literal) {
format = ZFS_NICENUM_RAW;
na = '0';
} else {
format = ZFS_NICENUM_1024;
na = '-';
}
/* only toplevel vdevs have capacity stats */
if (vs->vs_space == 0) {
if (cb->cb_scripted)
printf("\t%c\t%c", na, na);
else
printf(" %*c %*c", column_width, na, column_width,
na);
} else {
print_one_stat(vs->vs_alloc, format, column_width,
cb->cb_scripted);
print_one_stat(vs->vs_space - vs->vs_alloc, format,
column_width, cb->cb_scripted);
}
print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale),
format, column_width, cb->cb_scripted);
print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale),
format, column_width, cb->cb_scripted);
print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale),
format, column_width, cb->cb_scripted);
print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale),
format, column_width, cb->cb_scripted);
}
static const char *const class_name[] = {
VDEV_ALLOC_BIAS_DEDUP,
VDEV_ALLOC_BIAS_SPECIAL,
VDEV_ALLOC_CLASS_LOGS
};
/*
* Print out all the statistics for the given vdev. This can either be the
* toplevel configuration, or called recursively. If 'name' is NULL, then this
* is a verbose output, and we don't want to display the toplevel pool stats.
*
* Returns the number of stat lines printed.
*/
static unsigned int
print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
nvlist_t *newnv, iostat_cbdata_t *cb, int depth)
{
nvlist_t **oldchild, **newchild;
uint_t c, children, oldchildren;
vdev_stat_t *oldvs, *newvs, *calcvs;
vdev_stat_t zerovs = { 0 };
char *vname;
int i;
int ret = 0;
uint64_t tdelta;
double scale;
if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
return (ret);
calcvs = safe_malloc(sizeof (*calcvs));
if (oldnv != NULL) {
verify(nvlist_lookup_uint64_array(oldnv,
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);
} else {
oldvs = &zerovs;
}
/* Do we only want to see a specific vdev? */
for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) {
/* Yes we do. Is this the vdev? */
if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) {
/*
* This is our vdev. Since it is the only vdev we
* will be displaying, make depth = 0 so that it
* doesn't get indented.
*/
depth = 0;
break;
}
}
if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) {
/* Couldn't match the name */
goto children;
}
verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&newvs, &c) == 0);
/*
* Print the vdev name unless it's is a histogram. Histograms
* display the vdev name in the header itself.
*/
if (!(cb->cb_flags & IOS_ANYHISTO_M)) {
if (cb->cb_scripted) {
printf("%s", name);
} else {
if (strlen(name) + depth > cb->cb_namewidth)
(void) printf("%*s%s", depth, "", name);
else
(void) printf("%*s%s%*s", depth, "", name,
(int)(cb->cb_namewidth - strlen(name) -
depth), "");
}
}
/* Calculate our scaling factor */
tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;
if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {
/*
* If we specify printing histograms with no time interval, then
* print the histogram numbers over the entire lifetime of the
* vdev.
*/
scale = 1;
} else {
if (tdelta == 0)
scale = 1.0;
else
scale = (double)NANOSEC / tdelta;
}
if (cb->cb_flags & IOS_DEFAULT_M) {
calc_default_iostats(oldvs, newvs, calcvs);
print_iostat_default(calcvs, cb, scale);
}
if (cb->cb_flags & IOS_LATENCY_M)
print_iostat_latency(cb, oldnv, newnv);
if (cb->cb_flags & IOS_QUEUES_M)
print_iostat_queues(cb, newnv);
if (cb->cb_flags & IOS_ANYHISTO_M) {
printf("\n");
print_iostat_histos(cb, oldnv, newnv, scale, name);
}
if (cb->vcdl != NULL) {
const char *path;
if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,
&path) == 0) {
printf(" ");
zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
}
}
if (!(cb->cb_flags & IOS_ANYHISTO_M))
printf("\n");
ret++;
children:
free(calcvs);
if (!cb->cb_verbose)
return (ret);
if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,
&newchild, &children) != 0)
return (ret);
if (oldnv) {
if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,
&oldchild, &oldchildren) != 0)
return (ret);
children = MIN(oldchildren, children);
}
/*
* print normal top-level devices
*/
for (c = 0; c < children; c++) {
uint64_t ishole = B_FALSE, islog = B_FALSE;
(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,
&ishole);
(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,
&islog);
if (ishole || islog)
continue;
if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
newchild[c], cb, depth + 2);
free(vname);
}
/*
* print all other top-level devices
*/
for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
boolean_t printed = B_FALSE;
for (c = 0; c < children; c++) {
uint64_t islog = B_FALSE;
const char *bias = NULL;
const char *type = NULL;
(void) nvlist_lookup_uint64(newchild[c],
ZPOOL_CONFIG_IS_LOG, &islog);
if (islog) {
bias = VDEV_ALLOC_CLASS_LOGS;
} else {
(void) nvlist_lookup_string(newchild[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
(void) nvlist_lookup_string(newchild[c],
ZPOOL_CONFIG_TYPE, &type);
}
if (bias == NULL || strcmp(bias, class_name[n]) != 0)
continue;
if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
continue;
if (!printed) {
if ((!(cb->cb_flags & IOS_ANYHISTO_M)) &&
!cb->cb_scripted &&
!cb->cb_vdevs.cb_names) {
print_iostat_dashes(cb, 0,
class_name[n]);
}
printf("\n");
printed = B_TRUE;
}
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
ret += print_vdev_stats(zhp, vname, oldnv ?
oldchild[c] : NULL, newchild[c], cb, depth + 2);
free(vname);
}
}
/*
* Include level 2 ARC devices in iostat output
*/
if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,
&newchild, &children) != 0)
return (ret);
if (oldnv) {
if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,
&oldchild, &oldchildren) != 0)
return (ret);
children = MIN(oldchildren, children);
}
if (children > 0) {
if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
!cb->cb_vdevs.cb_names) {
print_iostat_dashes(cb, 0, "cache");
}
printf("\n");
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
cb->cb_vdevs.cb_name_flags);
ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c]
: NULL, newchild[c], cb, depth + 2);
free(vname);
}
}
return (ret);
}
static int
refresh_iostat(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
boolean_t missing;
/*
* If the pool has disappeared, remove it from the list and continue.
*/
if (zpool_refresh_stats(zhp, &missing) != 0)
return (-1);
if (missing)
pool_list_remove(cb->cb_list, zhp);
return (0);
}
/*
* Callback to print out the iostats for the given pool.
*/
static int
print_iostat(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
nvlist_t *oldconfig, *newconfig;
nvlist_t *oldnvroot, *newnvroot;
int ret;
newconfig = zpool_get_config(zhp, &oldconfig);
if (cb->cb_iteration == 1)
oldconfig = NULL;
verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,
&newnvroot) == 0);
if (oldconfig == NULL)
oldnvroot = NULL;
else
verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,
&oldnvroot) == 0);
ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,
cb, 0);
if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&
!cb->cb_scripted && cb->cb_verbose &&
!cb->cb_vdevs.cb_names_count) {
print_iostat_separator(cb);
if (cb->vcdl != NULL) {
print_cmd_columns(cb->vcdl, 1);
}
printf("\n");
}
return (ret);
}
static int
get_columns(void)
{
struct winsize ws;
int columns = 80;
int error;
if (isatty(STDOUT_FILENO)) {
error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);
if (error == 0)
columns = ws.ws_col;
} else {
columns = 999;
}
return (columns);
}
/*
* Return the required length of the pool/vdev name column. The minimum
* allowed width and output formatting flags must be provided.
*/
static int
get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose)
{
nvlist_t *config, *nvroot;
int width = min_width;
if ((config = zpool_get_config(zhp, NULL)) != NULL) {
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
size_t poolname_len = strlen(zpool_get_name(zhp));
if (verbose == B_FALSE) {
width = MAX(poolname_len, min_width);
} else {
width = MAX(poolname_len,
max_width(zhp, nvroot, 0, min_width, flags));
}
}
return (width);
}
/*
* Parse the input string, get the 'interval' and 'count' value if there is one.
*/
static void
get_interval_count(int *argcp, char **argv, float *iv,
unsigned long *cnt)
{
float interval = 0;
unsigned long count = 0;
int argc = *argcp;
/*
* Determine if the last argument is an integer or a pool name
*/
if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
char *end;
errno = 0;
interval = strtof(argv[argc - 1], &end);
if (*end == '\0' && errno == 0) {
if (interval == 0) {
(void) fprintf(stderr, gettext(
"interval cannot be zero\n"));
usage(B_FALSE);
}
/*
* Ignore the last parameter
*/
argc--;
} else {
/*
* If this is not a valid number, just plow on. The
* user will get a more informative error message later
* on.
*/
interval = 0;
}
}
/*
* If the last argument is also an integer, then we have both a count
* and an interval.
*/
if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
char *end;
errno = 0;
count = interval;
interval = strtof(argv[argc - 1], &end);
if (*end == '\0' && errno == 0) {
if (interval == 0) {
(void) fprintf(stderr, gettext(
"interval cannot be zero\n"));
usage(B_FALSE);
}
/*
* Ignore the last parameter
*/
argc--;
} else {
interval = 0;
}
}
*iv = interval;
*cnt = count;
*argcp = argc;
}
static void
get_timestamp_arg(char c)
{
if (c == 'u')
timestamp_fmt = UDATE;
else if (c == 'd')
timestamp_fmt = DDATE;
else
usage(B_FALSE);
}
/*
* Return stat flags that are supported by all pools by both the module and
* zpool iostat. "*data" should be initialized to all 0xFFs before running.
* It will get ANDed down until only the flags that are supported on all pools
* remain.
*/
static int
get_stat_flags_cb(zpool_handle_t *zhp, void *data)
{
uint64_t *mask = data;
nvlist_t *config, *nvroot, *nvx;
uint64_t flags = 0;
int i, j;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
/* Default stats are always supported, but for completeness.. */
if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS))
flags |= IOS_DEFAULT_M;
/* Get our extended stats nvlist from the main list */
if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX,
&nvx) != 0) {
/*
* No extended stats; they're probably running an older
* module. No big deal, we support that too.
*/
goto end;
}
/* For each extended stat, make sure all its nvpairs are supported */
for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) {
if (!vsx_type_to_nvlist[j][0])
continue;
/* Start off by assuming the flag is supported, then check */
flags |= (1ULL << j);
for (i = 0; vsx_type_to_nvlist[j][i]; i++) {
if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) {
/* flag isn't supported */
flags = flags & ~(1ULL << j);
break;
}
}
}
end:
*mask = *mask & flags;
return (0);
}
/*
* Return a bitmask of stats that are supported on all pools by both the module
* and zpool iostat.
*/
static uint64_t
get_stat_flags(zpool_list_t *list)
{
uint64_t mask = -1;
/*
* get_stat_flags_cb() will lop off bits from "mask" until only the
* flags that are supported on all pools remain.
*/
pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);
return (mask);
}
/*
* Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise.
*/
static int
is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data)
{
uint64_t guid;
vdev_cbdata_t *cb = cb_data;
zpool_handle_t *zhp = zhp_data;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (0);
return (guid == zpool_vdev_path_to_guid(zhp, cb->cb_names[0]));
}
/*
* Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise.
*/
static int
is_vdev(zpool_handle_t *zhp, void *cb_data)
{
return (for_each_vdev(zhp, is_vdev_cb, cb_data));
}
/*
* Check if vdevs are in a pool
*
* Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise
* return 0. If pool_name is NULL, then search all pools.
*/
static int
are_vdevs_in_pool(int argc, char **argv, char *pool_name,
vdev_cbdata_t *cb)
{
char **tmp_name;
int ret = 0;
int i;
int pool_count = 0;
if ((argc == 0) || !*argv)
return (0);
if (pool_name)
pool_count = 1;
/* Temporarily hijack cb_names for a second... */
tmp_name = cb->cb_names;
/* Go though our list of prospective vdev names */
for (i = 0; i < argc; i++) {
cb->cb_names = argv + i;
/* Is this name a vdev in our pools? */
ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,
ZFS_TYPE_POOL, B_FALSE, is_vdev, cb);
if (!ret) {
/* No match */
break;
}
}
cb->cb_names = tmp_name;
return (ret);
}
static int
is_pool_cb(zpool_handle_t *zhp, void *data)
{
char *name = data;
if (strcmp(name, zpool_get_name(zhp)) == 0)
return (1);
return (0);
}
/*
* Do we have a pool named *name? If so, return 1, otherwise 0.
*/
static int
is_pool(char *name)
{
return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE,
is_pool_cb, name));
}
/* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */
static int
are_all_pools(int argc, char **argv)
{
if ((argc == 0) || !*argv)
return (0);
while (--argc >= 0)
if (!is_pool(argv[argc]))
return (0);
return (1);
}
/*
* Helper function to print out vdev/pool names we can't resolve. Used for an
* error message.
*/
static void
error_list_unresolved_vdevs(int argc, char **argv, char *pool_name,
vdev_cbdata_t *cb)
{
int i;
char *name;
char *str;
for (i = 0; i < argc; i++) {
name = argv[i];
if (is_pool(name))
str = gettext("pool");
else if (are_vdevs_in_pool(1, &name, pool_name, cb))
str = gettext("vdev in this pool");
else if (are_vdevs_in_pool(1, &name, NULL, cb))
str = gettext("vdev in another pool");
else
str = gettext("unknown");
fprintf(stderr, "\t%s (%s)\n", name, str);
}
}
/*
* Same as get_interval_count(), but with additional checks to not misinterpret
* guids as interval/count values. Assumes VDEV_NAME_GUID is set in
* cb.cb_vdevs.cb_name_flags.
*/
static void
get_interval_count_filter_guids(int *argc, char **argv, float *interval,
unsigned long *count, iostat_cbdata_t *cb)
{
char **tmpargv = argv;
int argc_for_interval = 0;
/* Is the last arg an interval value? Or a guid? */
if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL,
&cb->cb_vdevs)) {
/*
* The last arg is not a guid, so it's probably an
* interval value.
*/
argc_for_interval++;
if (*argc >= 2 &&
!are_vdevs_in_pool(1, &argv[*argc - 2], NULL,
&cb->cb_vdevs)) {
/*
* The 2nd to last arg is not a guid, so it's probably
* an interval value.
*/
argc_for_interval++;
}
}
/* Point to our list of possible intervals */
tmpargv = &argv[*argc - argc_for_interval];
*argc = *argc - argc_for_interval;
get_interval_count(&argc_for_interval, tmpargv,
interval, count);
}
/*
* Floating point sleep(). Allows you to pass in a floating point value for
* seconds.
*/
static void
fsleep(float sec)
{
struct timespec req;
req.tv_sec = floor(sec);
req.tv_nsec = (sec - (float)req.tv_sec) * NANOSEC;
nanosleep(&req, NULL);
}
/*
* Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or
* if we were unable to determine its size.
*/
static int
terminal_height(void)
{
struct winsize win;
if (isatty(STDOUT_FILENO) == 0)
return (-1);
if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0)
return (win.ws_row);
return (-1);
}
/*
* Run one of the zpool status/iostat -c scripts with the help (-h) option and
* print the result.
*
* name: Short name of the script ('iostat').
* path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');
*/
static void
print_zpool_script_help(char *name, char *path)
{
char *argv[] = {path, (char *)"-h", NULL};
char **lines = NULL;
int lines_cnt = 0;
int rc;
rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,
&lines_cnt);
if (rc != 0 || lines == NULL || lines_cnt <= 0) {
if (lines != NULL)
libzfs_free_str_array(lines, lines_cnt);
return;
}
for (int i = 0; i < lines_cnt; i++)
if (!is_blank_str(lines[i]))
printf(" %-14s %s\n", name, lines[i]);
libzfs_free_str_array(lines, lines_cnt);
}
/*
* Go though the zpool status/iostat -c scripts in the user's path, run their
* help option (-h), and print out the results.
*/
static void
print_zpool_dir_scripts(char *dirpath)
{
DIR *dir;
struct dirent *ent;
char fullpath[MAXPATHLEN];
struct stat dir_stat;
if ((dir = opendir(dirpath)) != NULL) {
/* print all the files and directories within directory */
while ((ent = readdir(dir)) != NULL) {
if (snprintf(fullpath, sizeof (fullpath), "%s/%s",
dirpath, ent->d_name) >= sizeof (fullpath)) {
(void) fprintf(stderr,
gettext("internal error: "
"ZPOOL_SCRIPTS_PATH too large.\n"));
exit(1);
}
/* Print the scripts */
if (stat(fullpath, &dir_stat) == 0)
if (dir_stat.st_mode & S_IXUSR &&
S_ISREG(dir_stat.st_mode))
print_zpool_script_help(ent->d_name,
fullpath);
}
closedir(dir);
}
}
/*
* Print out help text for all zpool status/iostat -c scripts.
*/
static void
print_zpool_script_list(const char *subcommand)
{
char *dir, *sp, *tmp;
printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);
sp = zpool_get_cmd_search_path();
if (sp == NULL)
return;
for (dir = strtok_r(sp, ":", &tmp);
dir != NULL;
dir = strtok_r(NULL, ":", &tmp))
print_zpool_dir_scripts(dir);
free(sp);
}
/*
* Set the minimum pool/vdev name column width. The width must be at least 10,
* but may be as large as the column width - 42 so it still fits on one line.
* NOTE: 42 is the width of the default capacity/operations/bandwidth output
*/
static int
get_namewidth_iostat(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
int width, available_width;
/*
* get_namewidth() returns the maximum width of any name in that column
* for any pool/vdev/device line that will be output.
*/
width = get_namewidth(zhp, cb->cb_namewidth,
cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
/*
* The width we are calculating is the width of the header and also the
* padding width for names that are less than maximum width. The stats
* take up 42 characters, so the width available for names is:
*/
available_width = get_columns() - 42;
/*
* If the maximum width fits on a screen, then great! Make everything
* line up by justifying all lines to the same width. If that max
* width is larger than what's available, the name plus stats won't fit
* on one line, and justifying to that width would cause every line to
* wrap on the screen. We only want lines with long names to wrap.
* Limit the padding to what won't wrap.
*/
if (width > available_width)
width = available_width;
/*
* And regardless of whatever the screen width is (get_columns can
* return 0 if the width is not known or less than 42 for a narrow
* terminal) have the width be a minimum of 10.
*/
if (width < 10)
width = 10;
/* Save the calculated width */
cb->cb_namewidth = width;
return (0);
}
/*
* zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]
* [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]
* [interval [count]]
*
* -c CMD For each vdev, run command CMD
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -P Display full path for vdev name.
* -v Display statistics for individual vdevs
* -h Display help
* -p Display values in parsable (exact) format.
* -H Scripted mode. Don't display headers, and separate properties
* by a single tab.
* -l Display average latency
* -q Display queue depths
* -w Display latency histograms
* -r Display request size histogram
* -T Display a timestamp in date(1) or Unix format
* -n Only print headers once
*
* This command can be tricky because we want to be able to deal with pool
* creation/destruction as well as vdev configuration changes. The bulk of this
* processing is handled by the pool_list_* routines in zpool_iter.c. We rely
* on pool_list_update() to detect the addition of new pools. Configuration
* changes are all handled within libzfs.
*/
int
zpool_do_iostat(int argc, char **argv)
{
int c;
int ret;
int npools;
float interval = 0;
unsigned long count = 0;
int winheight = 24;
zpool_list_t *list;
boolean_t verbose = B_FALSE;
boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;
boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;
boolean_t omit_since_boot = B_FALSE;
boolean_t guid = B_FALSE;
boolean_t follow_links = B_FALSE;
boolean_t full_name = B_FALSE;
boolean_t headers_once = B_FALSE;
iostat_cbdata_t cb = { 0 };
char *cmd = NULL;
/* Used for printing error message */
const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',
[IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};
uint64_t unsupported_flags;
/* check options */
while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) {
switch (c) {
case 'c':
if (cmd != NULL) {
fprintf(stderr,
gettext("Can't set -c flag twice\n"));
exit(1);
}
if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
fprintf(stderr, gettext(
"Can't run -c, disabled by "
"ZPOOL_SCRIPTS_ENABLED.\n"));
exit(1);
}
if ((getuid() <= 0 || geteuid() <= 0) &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
fprintf(stderr, gettext(
"Can't run -c with root privileges "
"unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
exit(1);
}
cmd = optarg;
verbose = B_TRUE;
break;
case 'g':
guid = B_TRUE;
break;
case 'L':
follow_links = B_TRUE;
break;
case 'P':
full_name = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 'v':
verbose = B_TRUE;
break;
case 'p':
parsable = B_TRUE;
break;
case 'l':
latency = B_TRUE;
break;
case 'q':
queues = B_TRUE;
break;
case 'H':
scripted = B_TRUE;
break;
case 'w':
l_histo = B_TRUE;
break;
case 'r':
rq_histo = B_TRUE;
break;
case 'y':
omit_since_boot = B_TRUE;
break;
case 'n':
headers_once = B_TRUE;
break;
case 'h':
usage(B_FALSE);
break;
case '?':
if (optopt == 'c') {
print_zpool_script_list("iostat");
exit(0);
} else {
fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
cb.cb_literal = parsable;
cb.cb_scripted = scripted;
if (guid)
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID;
if (follow_links)
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
if (full_name)
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH;
cb.cb_iteration = 0;
cb.cb_namewidth = 0;
cb.cb_verbose = verbose;
/* Get our interval and count values (if any) */
if (guid) {
get_interval_count_filter_guids(&argc, argv, &interval,
&count, &cb);
} else {
get_interval_count(&argc, argv, &interval, &count);
}
if (argc == 0) {
/* No args, so just print the defaults. */
} else if (are_all_pools(argc, argv)) {
/* All the args are pool names */
} else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) {
/* All the args are vdevs */
cb.cb_vdevs.cb_names = argv;
cb.cb_vdevs.cb_names_count = argc;
argc = 0; /* No pools to process */
} else if (are_all_pools(1, argv)) {
/* The first arg is a pool name */
if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
&cb.cb_vdevs)) {
/* ...and the rest are vdev names */
cb.cb_vdevs.cb_names = argv + 1;
cb.cb_vdevs.cb_names_count = argc - 1;
argc = 1; /* One pool to process */
} else {
fprintf(stderr, gettext("Expected either a list of "));
fprintf(stderr, gettext("pools, or list of vdevs in"));
fprintf(stderr, " \"%s\", ", argv[0]);
fprintf(stderr, gettext("but got:\n"));
error_list_unresolved_vdevs(argc - 1, argv + 1,
argv[0], &cb.cb_vdevs);
fprintf(stderr, "\n");
usage(B_FALSE);
return (1);
}
} else {
/*
* The args don't make sense. The first arg isn't a pool name,
* nor are all the args vdevs.
*/
fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n"));
fprintf(stderr, "\n");
return (1);
}
if (cb.cb_vdevs.cb_names_count != 0) {
/*
* If user specified vdevs, it implies verbose.
*/
cb.cb_verbose = B_TRUE;
}
/*
* Construct the list of all interesting pools.
*/
ret = 0;
if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable,
&ret)) == NULL)
return (1);
if (pool_list_count(list) == 0 && argc != 0) {
pool_list_free(list);
return (1);
}
if (pool_list_count(list) == 0 && interval == 0) {
pool_list_free(list);
(void) fprintf(stderr, gettext("no pools available\n"));
return (1);
}
if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {
pool_list_free(list);
(void) fprintf(stderr,
gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));
usage(B_FALSE);
return (1);
}
if (l_histo && rq_histo) {
pool_list_free(list);
(void) fprintf(stderr,
gettext("Only one of [-r|-w] can be passed at a time\n"));
usage(B_FALSE);
return (1);
}
/*
* Enter the main iostat loop.
*/
cb.cb_list = list;
if (l_histo) {
/*
* Histograms tables look out of place when you try to display
* them with the other stats, so make a rule that you can only
* print histograms by themselves.
*/
cb.cb_flags = IOS_L_HISTO_M;
} else if (rq_histo) {
cb.cb_flags = IOS_RQ_HISTO_M;
} else {
cb.cb_flags = IOS_DEFAULT_M;
if (latency)
cb.cb_flags |= IOS_LATENCY_M;
if (queues)
cb.cb_flags |= IOS_QUEUES_M;
}
/*
* See if the module supports all the stats we want to display.
*/
unsupported_flags = cb.cb_flags & ~get_stat_flags(list);
if (unsupported_flags) {
uint64_t f;
int idx;
fprintf(stderr,
gettext("The loaded zfs module doesn't support:"));
/* for each bit set in unsupported_flags */
for (f = unsupported_flags; f; f &= ~(1ULL << idx)) {
idx = lowbit64(f) - 1;
fprintf(stderr, " -%c", flag_to_arg[idx]);
}
fprintf(stderr, ". Try running a newer module.\n");
pool_list_free(list);
return (1);
}
for (;;) {
if ((npools = pool_list_count(list)) == 0)
(void) fprintf(stderr, gettext("no pools available\n"));
else {
/*
* If this is the first iteration and -y was supplied
* we skip any printing.
*/
boolean_t skip = (omit_since_boot &&
cb.cb_iteration == 0);
/*
* Refresh all statistics. This is done as an
* explicit step before calculating the maximum name
* width, so that any * configuration changes are
* properly accounted for.
*/
(void) pool_list_iter(list, B_FALSE, refresh_iostat,
&cb);
/*
* Iterate over all pools to determine the maximum width
* for the pool / device name column across all pools.
*/
cb.cb_namewidth = 0;
(void) pool_list_iter(list, B_FALSE,
get_namewidth_iostat, &cb);
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
if (cmd != NULL && cb.cb_verbose &&
!(cb.cb_flags & IOS_ANYHISTO_M)) {
cb.vcdl = all_pools_for_each_vdev_run(argc,
argv, cmd, g_zfs, cb.cb_vdevs.cb_names,
cb.cb_vdevs.cb_names_count,
cb.cb_vdevs.cb_name_flags);
} else {
cb.vcdl = NULL;
}
/*
* Check terminal size so we can print headers
* even when terminal window has its height
* changed.
*/
winheight = terminal_height();
/*
* Are we connected to TTY? If not, headers_once
* should be true, to avoid breaking scripts.
*/
if (winheight < 0)
headers_once = B_TRUE;
/*
* If it's the first time and we're not skipping it,
* or either skip or verbose mode, print the header.
*
* The histogram code explicitly prints its header on
* every vdev, so skip this for histograms.
*/
if (((++cb.cb_iteration == 1 && !skip) ||
(skip != verbose) ||
(!headers_once &&
(cb.cb_iteration % winheight) == 0)) &&
(!(cb.cb_flags & IOS_ANYHISTO_M)) &&
!cb.cb_scripted)
print_iostat_header(&cb);
if (skip) {
(void) fsleep(interval);
continue;
}
pool_list_iter(list, B_FALSE, print_iostat, &cb);
/*
* If there's more than one pool, and we're not in
* verbose mode (which prints a separator for us),
* then print a separator.
*
* In addition, if we're printing specific vdevs then
* we also want an ending separator.
*/
if (((npools > 1 && !verbose &&
!(cb.cb_flags & IOS_ANYHISTO_M)) ||
(!(cb.cb_flags & IOS_ANYHISTO_M) &&
cb.cb_vdevs.cb_names_count)) &&
!cb.cb_scripted) {
print_iostat_separator(&cb);
if (cb.vcdl != NULL)
print_cmd_columns(cb.vcdl, 1);
printf("\n");
}
if (cb.vcdl != NULL)
free_vdev_cmd_data_list(cb.vcdl);
}
/*
* Flush the output so that redirection to a file isn't buffered
* indefinitely.
*/
(void) fflush(stdout);
if (interval == 0)
break;
if (count != 0 && --count == 0)
break;
(void) fsleep(interval);
}
pool_list_free(list);
return (ret);
}
typedef struct list_cbdata {
boolean_t cb_verbose;
int cb_name_flags;
int cb_namewidth;
boolean_t cb_scripted;
zprop_list_t *cb_proplist;
boolean_t cb_literal;
} list_cbdata_t;
/*
* Given a list of columns to display, output appropriate headers for each one.
*/
static void
print_header(list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
char headerbuf[ZPOOL_MAXPROPLEN];
const char *header;
boolean_t first = B_TRUE;
boolean_t right_justify;
size_t width = 0;
for (; pl != NULL; pl = pl->pl_next) {
width = pl->pl_width;
if (first && cb->cb_verbose) {
/*
* Reset the width to accommodate the verbose listing
* of devices.
*/
width = cb->cb_namewidth;
}
if (!first)
(void) fputs(" ", stdout);
else
first = B_FALSE;
right_justify = B_FALSE;
if (pl->pl_prop != ZPROP_USERPROP) {
header = zpool_prop_column_name(pl->pl_prop);
right_justify = zpool_prop_align_right(pl->pl_prop);
} else {
int i;
for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
headerbuf[i] = toupper(pl->pl_user_prop[i]);
headerbuf[i] = '\0';
header = headerbuf;
}
if (pl->pl_next == NULL && !right_justify)
(void) fputs(header, stdout);
else if (right_justify)
(void) printf("%*s", (int)width, header);
else
(void) printf("%-*s", (int)width, header);
}
(void) fputc('\n', stdout);
}
/*
* Given a pool and a list of properties, print out all the properties according
* to the described layout. Used by zpool_do_list().
*/
static void
print_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
boolean_t first = B_TRUE;
char property[ZPOOL_MAXPROPLEN];
const char *propstr;
boolean_t right_justify;
size_t width;
for (; pl != NULL; pl = pl->pl_next) {
width = pl->pl_width;
if (first && cb->cb_verbose) {
/*
* Reset the width to accommodate the verbose listing
* of devices.
*/
width = cb->cb_namewidth;
}
if (!first) {
if (cb->cb_scripted)
(void) fputc('\t', stdout);
else
(void) fputs(" ", stdout);
} else {
first = B_FALSE;
}
right_justify = B_FALSE;
if (pl->pl_prop != ZPROP_USERPROP) {
if (zpool_get_prop(zhp, pl->pl_prop, property,
sizeof (property), NULL, cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = zpool_prop_align_right(pl->pl_prop);
} else if ((zpool_prop_feature(pl->pl_user_prop) ||
zpool_prop_unsupported(pl->pl_user_prop)) &&
zpool_prop_get_feature(zhp, pl->pl_user_prop, property,
sizeof (property)) == 0) {
propstr = property;
} else if (zfs_prop_user(pl->pl_user_prop) &&
zpool_get_userprop(zhp, pl->pl_user_prop, property,
sizeof (property), NULL) == 0) {
propstr = property;
} else {
propstr = "-";
}
/*
* If this is being called in scripted mode, or if this is the
* last column and it is left-justified, don't include a width
* format specifier.
*/
if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify))
(void) fputs(propstr, stdout);
else if (right_justify)
(void) printf("%*s", (int)width, propstr);
else
(void) printf("%-*s", (int)width, propstr);
}
(void) fputc('\n', stdout);
}
static void
print_one_column(zpool_prop_t prop, uint64_t value, const char *str,
boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format)
{
char propval[64];
boolean_t fixed;
size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);
switch (prop) {
case ZPOOL_PROP_SIZE:
case ZPOOL_PROP_EXPANDSZ:
case ZPOOL_PROP_CHECKPOINT:
case ZPOOL_PROP_DEDUPRATIO:
if (value == 0)
(void) strlcpy(propval, "-", sizeof (propval));
else
zfs_nicenum_format(value, propval, sizeof (propval),
format);
break;
case ZPOOL_PROP_FRAGMENTATION:
if (value == ZFS_FRAG_INVALID) {
(void) strlcpy(propval, "-", sizeof (propval));
} else if (format == ZFS_NICENUM_RAW) {
(void) snprintf(propval, sizeof (propval), "%llu",
(unsigned long long)value);
} else {
(void) snprintf(propval, sizeof (propval), "%llu%%",
(unsigned long long)value);
}
break;
case ZPOOL_PROP_CAPACITY:
/* capacity value is in parts-per-10,000 (aka permyriad) */
if (format == ZFS_NICENUM_RAW)
(void) snprintf(propval, sizeof (propval), "%llu",
(unsigned long long)value / 100);
else
(void) snprintf(propval, sizeof (propval),
value < 1000 ? "%1.2f%%" : value < 10000 ?
"%2.1f%%" : "%3.0f%%", value / 100.0);
break;
case ZPOOL_PROP_HEALTH:
width = 8;
(void) strlcpy(propval, str, sizeof (propval));
break;
default:
zfs_nicenum_format(value, propval, sizeof (propval), format);
}
if (!valid)
(void) strlcpy(propval, "-", sizeof (propval));
if (scripted)
(void) printf("\t%s", propval);
else
(void) printf(" %*s", (int)width, propval);
}
/*
* print static default line per vdev
* not compatible with '-o' <proplist> option
*/
static void
print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
list_cbdata_t *cb, int depth, boolean_t isspare)
{
nvlist_t **child;
vdev_stat_t *vs;
uint_t c, children;
char *vname;
boolean_t scripted = cb->cb_scripted;
uint64_t islog = B_FALSE;
const char *dashes = "%-*s - - - - "
"- - - - -\n";
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
if (name != NULL) {
boolean_t toplevel = (vs->vs_space != 0);
uint64_t cap;
enum zfs_nicenum_format format;
const char *state;
if (cb->cb_literal)
format = ZFS_NICENUM_RAW;
else
format = ZFS_NICENUM_1024;
if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
return;
if (scripted)
(void) printf("\t%s", name);
else if (strlen(name) + depth > cb->cb_namewidth)
(void) printf("%*s%s", depth, "", name);
else
(void) printf("%*s%s%*s", depth, "", name,
(int)(cb->cb_namewidth - strlen(name) - depth), "");
/*
* Print the properties for the individual vdevs. Some
* properties are only applicable to toplevel vdevs. The
* 'toplevel' boolean value is passed to the print_one_column()
* to indicate that the value is valid.
*/
if (VDEV_STAT_VALID(vs_pspace, c) && vs->vs_pspace)
print_one_column(ZPOOL_PROP_SIZE, vs->vs_pspace, NULL,
scripted, B_TRUE, format);
else
print_one_column(ZPOOL_PROP_SIZE, vs->vs_space, NULL,
scripted, toplevel, format);
print_one_column(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL,
scripted, toplevel, format);
print_one_column(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc,
NULL, scripted, toplevel, format);
print_one_column(ZPOOL_PROP_CHECKPOINT,
vs->vs_checkpoint_space, NULL, scripted, toplevel, format);
print_one_column(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL,
scripted, B_TRUE, format);
print_one_column(ZPOOL_PROP_FRAGMENTATION,
vs->vs_fragmentation, NULL, scripted,
(vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel),
format);
cap = (vs->vs_space == 0) ? 0 :
(vs->vs_alloc * 10000 / vs->vs_space);
print_one_column(ZPOOL_PROP_CAPACITY, cap, NULL,
scripted, toplevel, format);
print_one_column(ZPOOL_PROP_DEDUPRATIO, 0, NULL,
scripted, toplevel, format);
state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
if (isspare) {
if (vs->vs_aux == VDEV_AUX_SPARED)
state = "INUSE";
else if (vs->vs_state == VDEV_STATE_HEALTHY)
state = "AVAIL";
}
print_one_column(ZPOOL_PROP_HEALTH, 0, state, scripted,
B_TRUE, format);
(void) fputc('\n', stdout);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return;
/* list the normal vdevs first */
for (c = 0; c < children; c++) {
uint64_t ishole = B_FALSE;
if (nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)
continue;
if (nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog)
continue;
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_list_stats(zhp, vname, child[c], cb, depth + 2, B_FALSE);
free(vname);
}
/* list the classes: 'logs', 'dedup', and 'special' */
for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
boolean_t printed = B_FALSE;
for (c = 0; c < children; c++) {
const char *bias = NULL;
const char *type = NULL;
if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&islog) == 0 && islog) {
bias = VDEV_ALLOC_CLASS_LOGS;
} else {
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_TYPE, &type);
}
if (bias == NULL || strcmp(bias, class_name[n]) != 0)
continue;
if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
continue;
if (!printed) {
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth,
class_name[n]);
printed = B_TRUE;
}
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_list_stats(zhp, vname, child[c], cb, depth + 2,
B_FALSE);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0 && children > 0) {
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth, "cache");
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
print_list_stats(zhp, vname, child[c], cb, depth + 2,
B_FALSE);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child,
&children) == 0 && children > 0) {
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth, "spare");
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
print_list_stats(zhp, vname, child[c], cb, depth + 2,
B_TRUE);
free(vname);
}
}
}
/*
* Generic callback function to list a pool.
*/
static int
list_callback(zpool_handle_t *zhp, void *data)
{
list_cbdata_t *cbp = data;
print_pool(zhp, cbp);
if (cbp->cb_verbose) {
nvlist_t *config, *nvroot;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
print_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE);
}
return (0);
}
/*
* Set the minimum pool/vdev name column width. The width must be at least 9,
* but may be as large as needed.
*/
static int
get_namewidth_list(zpool_handle_t *zhp, void *data)
{
list_cbdata_t *cb = data;
int width;
width = get_namewidth(zhp, cb->cb_namewidth,
cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
if (width < 9)
width = 9;
cb->cb_namewidth = width;
return (0);
}
/*
* zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]
*
* -g Display guid for individual vdev name.
* -H Scripted mode. Don't display headers, and separate properties
* by a single tab.
* -L Follow links when resolving vdev path name.
* -o List of properties to display. Defaults to
* "name,size,allocated,free,expandsize,fragmentation,capacity,"
* "dedupratio,health,altroot"
* -p Display values in parsable (exact) format.
* -P Display full path for vdev name.
* -T Display a timestamp in date(1) or Unix format
*
* List all pools in the system, whether or not they're healthy. Output space
* statistics for each one, as well as health status summary.
*/
int
zpool_do_list(int argc, char **argv)
{
int c;
int ret = 0;
list_cbdata_t cb = { 0 };
static char default_props[] =
"name,size,allocated,free,checkpoint,expandsize,fragmentation,"
"capacity,dedupratio,health,altroot";
char *props = default_props;
float interval = 0;
unsigned long count = 0;
zpool_list_t *list;
boolean_t first = B_TRUE;
current_prop_type = ZFS_TYPE_POOL;
/* check options */
while ((c = getopt(argc, argv, ":gHLo:pPT:v")) != -1) {
switch (c) {
case 'g':
cb.cb_name_flags |= VDEV_NAME_GUID;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case 'L':
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'o':
props = optarg;
break;
case 'P':
cb.cb_name_flags |= VDEV_NAME_PATH;
break;
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 'v':
cb.cb_verbose = B_TRUE;
cb.cb_namewidth = 8; /* 8 until precalc is avail */
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
get_interval_count(&argc, argv, &interval, &count);
if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)
usage(B_FALSE);
for (;;) {
if ((list = pool_list_get(argc, argv, &cb.cb_proplist,
ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL)
return (1);
if (pool_list_count(list) == 0)
break;
cb.cb_namewidth = 0;
(void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb);
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
if (!cb.cb_scripted && (first || cb.cb_verbose)) {
print_header(&cb);
first = B_FALSE;
}
ret = pool_list_iter(list, B_TRUE, list_callback, &cb);
if (interval == 0)
break;
if (count != 0 && --count == 0)
break;
pool_list_free(list);
(void) fsleep(interval);
}
if (argc == 0 && !cb.cb_scripted && pool_list_count(list) == 0) {
(void) printf(gettext("no pools available\n"));
ret = 0;
}
pool_list_free(list);
zprop_free_list(cb.cb_proplist);
return (ret);
}
static int
zpool_do_attach_or_replace(int argc, char **argv, int replacing)
{
boolean_t force = B_FALSE;
boolean_t rebuild = B_FALSE;
boolean_t wait = B_FALSE;
int c;
nvlist_t *nvroot;
char *poolname, *old_disk, *new_disk;
zpool_handle_t *zhp;
nvlist_t *props = NULL;
char *propval;
int ret;
/* check options */
while ((c = getopt(argc, argv, "fo:sw")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case 'o':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -o option\n"));
usage(B_FALSE);
}
*propval = '\0';
propval++;
if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
(add_prop_list(optarg, propval, &props, B_TRUE)))
usage(B_FALSE);
break;
case 's':
rebuild = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
poolname = argv[0];
if (argc < 2) {
(void) fprintf(stderr,
gettext("missing <device> specification\n"));
usage(B_FALSE);
}
old_disk = argv[1];
if (argc < 3) {
if (!replacing) {
(void) fprintf(stderr,
gettext("missing <new_device> specification\n"));
usage(B_FALSE);
}
new_disk = old_disk;
argc -= 1;
argv += 1;
} else {
new_disk = argv[2];
argc -= 2;
argv += 2;
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
nvlist_free(props);
return (1);
}
if (zpool_get_config(zhp, NULL) == NULL) {
(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
poolname);
zpool_close(zhp);
nvlist_free(props);
return (1);
}
/* unless manually specified use "ashift" pool property (if set) */
if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
int intval;
zprop_source_t src;
char strval[ZPOOL_MAXPROPLEN];
intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
if (src != ZPROP_SRC_DEFAULT) {
(void) sprintf(strval, "%" PRId32, intval);
verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
&props, B_TRUE) == 0);
}
}
nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,
argc, argv);
if (nvroot == NULL) {
zpool_close(zhp);
nvlist_free(props);
return (1);
}
ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing,
rebuild);
if (ret == 0 && wait)
ret = zpool_wait(zhp,
replacing ? ZPOOL_WAIT_REPLACE : ZPOOL_WAIT_RESILVER);
nvlist_free(props);
nvlist_free(nvroot);
zpool_close(zhp);
return (ret);
}
/*
* zpool replace [-fsw] [-o property=value] <pool> <device> <new_device>
*
* -f Force attach, even if <new_device> appears to be in use.
* -s Use sequential instead of healing reconstruction for resilver.
* -o Set property=value.
* -w Wait for replacing to complete before returning
*
* Replace <device> with <new_device>.
*/
int
zpool_do_replace(int argc, char **argv)
{
return (zpool_do_attach_or_replace(argc, argv, B_TRUE));
}
/*
* zpool attach [-fsw] [-o property=value] <pool> <device> <new_device>
*
* -f Force attach, even if <new_device> appears to be in use.
* -s Use sequential instead of healing reconstruction for resilver.
* -o Set property=value.
* -w Wait for resilvering to complete before returning
*
* Attach <new_device> to the mirror containing <device>. If <device> is not
* part of a mirror, then <device> will be transformed into a mirror of
* <device> and <new_device>. In either case, <new_device> will begin life
* with a DTL of [0, now], and will immediately begin to resilver itself.
*/
int
zpool_do_attach(int argc, char **argv)
{
return (zpool_do_attach_or_replace(argc, argv, B_FALSE));
}
/*
* zpool detach [-f] <pool> <device>
*
* -f Force detach of <device>, even if DTLs argue against it
* (not supported yet)
*
* Detach a device from a mirror. The operation will be refused if <device>
* is the last device in the mirror, or if the DTLs indicate that this device
* has the only valid copy of some data.
*/
int
zpool_do_detach(int argc, char **argv)
{
int c;
char *poolname, *path;
zpool_handle_t *zhp;
int ret;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr,
gettext("missing <device> specification\n"));
usage(B_FALSE);
}
poolname = argv[0];
path = argv[1];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
ret = zpool_vdev_detach(zhp, path);
zpool_close(zhp);
return (ret);
}
/*
* zpool split [-gLnP] [-o prop=val] ...
* [-o mntopt] ...
* [-R altroot] <pool> <newpool> [<device> ...]
*
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -n Do not split the pool, but display the resulting layout if
* it were to be split.
* -o Set property=value, or set mount options.
* -P Display full path for vdev name.
* -R Mount the split-off pool under an alternate root.
* -l Load encryption keys while importing.
*
* Splits the named pool and gives it the new pool name. Devices to be split
* off may be listed, provided that no more than one device is specified
* per top-level vdev mirror. The newly split pool is left in an exported
* state unless -R is specified.
*
* Restrictions: the top-level of the pool pool must only be made up of
* mirrors; all devices in the pool must be healthy; no device may be
* undergoing a resilvering operation.
*/
int
zpool_do_split(int argc, char **argv)
{
char *srcpool, *newpool, *propval;
char *mntopts = NULL;
splitflags_t flags;
int c, ret = 0;
+ int ms_status = 0;
boolean_t loadkeys = B_FALSE;
zpool_handle_t *zhp;
nvlist_t *config, *props = NULL;
flags.dryrun = B_FALSE;
flags.import = B_FALSE;
flags.name_flags = 0;
/* check options */
while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {
switch (c) {
case 'g':
flags.name_flags |= VDEV_NAME_GUID;
break;
case 'L':
flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'R':
flags.import = B_TRUE;
if (add_prop_list(
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,
&props, B_TRUE) != 0) {
nvlist_free(props);
usage(B_FALSE);
}
break;
case 'l':
loadkeys = B_TRUE;
break;
case 'n':
flags.dryrun = B_TRUE;
break;
case 'o':
if ((propval = strchr(optarg, '=')) != NULL) {
*propval = '\0';
propval++;
if (add_prop_list(optarg, propval,
&props, B_TRUE) != 0) {
nvlist_free(props);
usage(B_FALSE);
}
} else {
mntopts = optarg;
}
break;
case 'P':
flags.name_flags |= VDEV_NAME_PATH;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
break;
}
}
if (!flags.import && mntopts != NULL) {
(void) fprintf(stderr, gettext("setting mntopts is only "
"valid when importing the pool\n"));
usage(B_FALSE);
}
if (!flags.import && loadkeys) {
(void) fprintf(stderr, gettext("loading keys is only "
"valid when importing the pool\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("Missing pool name\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("Missing new pool name\n"));
usage(B_FALSE);
}
srcpool = argv[0];
newpool = argv[1];
argc -= 2;
argv += 2;
if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {
nvlist_free(props);
return (1);
}
config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);
if (config == NULL) {
ret = 1;
} else {
if (flags.dryrun) {
(void) printf(gettext("would create '%s' with the "
"following layout:\n\n"), newpool);
print_vdev_tree(NULL, newpool, config, 0, "",
flags.name_flags);
print_vdev_tree(NULL, "dedup", config, 0,
VDEV_ALLOC_BIAS_DEDUP, 0);
print_vdev_tree(NULL, "special", config, 0,
VDEV_ALLOC_BIAS_SPECIAL, 0);
}
}
zpool_close(zhp);
if (ret != 0 || flags.dryrun || !flags.import) {
nvlist_free(config);
nvlist_free(props);
return (ret);
}
/*
* The split was successful. Now we need to open the new
* pool and import it.
*/
if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {
nvlist_free(config);
nvlist_free(props);
return (1);
}
if (loadkeys) {
ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);
if (ret != 0)
ret = 1;
}
- if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
- zpool_enable_datasets(zhp, mntopts, 0) != 0) {
- ret = 1;
- (void) fprintf(stderr, gettext("Split was successful, but "
- "the datasets could not all be mounted\n"));
- (void) fprintf(stderr, gettext("Try doing '%s' with a "
- "different altroot\n"), "zpool import");
+ if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
+ ms_status = zpool_enable_datasets(zhp, mntopts, 0);
+ if (ms_status == EZFS_SHAREFAILED) {
+ (void) fprintf(stderr, gettext("Split was successful, "
+ "datasets are mounted but sharing of some datasets "
+ "has failed\n"));
+ } else if (ms_status == EZFS_MOUNTFAILED) {
+ (void) fprintf(stderr, gettext("Split was successful"
+ ", but some datasets could not be mounted\n"));
+ (void) fprintf(stderr, gettext("Try doing '%s' with a "
+ "different altroot\n"), "zpool import");
+ }
}
zpool_close(zhp);
nvlist_free(config);
nvlist_free(props);
return (ret);
}
/*
* zpool online <pool> <device> ...
*/
int
zpool_do_online(int argc, char **argv)
{
int c, i;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
vdev_state_t newstate;
int flags = 0;
/* check options */
while ((c = getopt(argc, argv, "e")) != -1) {
switch (c) {
case 'e':
flags |= ZFS_ONLINE_EXPAND;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing device name\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
for (i = 1; i < argc; i++) {
vdev_state_t oldstate;
boolean_t avail_spare, l2cache;
nvlist_t *tgt = zpool_find_vdev(zhp, argv[i], &avail_spare,
&l2cache, NULL);
if (tgt == NULL) {
ret = 1;
continue;
}
uint_t vsc;
oldstate = ((vdev_stat_t *)fnvlist_lookup_uint64_array(tgt,
ZPOOL_CONFIG_VDEV_STATS, &vsc))->vs_state;
if (zpool_vdev_online(zhp, argv[i], flags, &newstate) == 0) {
if (newstate != VDEV_STATE_HEALTHY) {
(void) printf(gettext("warning: device '%s' "
"onlined, but remains in faulted state\n"),
argv[i]);
if (newstate == VDEV_STATE_FAULTED)
(void) printf(gettext("use 'zpool "
"clear' to restore a faulted "
"device\n"));
else
(void) printf(gettext("use 'zpool "
"replace' to replace devices "
"that are no longer present\n"));
if ((flags & ZFS_ONLINE_EXPAND)) {
(void) printf(gettext("%s: failed "
"to expand usable space on "
"unhealthy device '%s'\n"),
(oldstate >= VDEV_STATE_DEGRADED ?
"error" : "warning"), argv[i]);
if (oldstate >= VDEV_STATE_DEGRADED) {
ret = 1;
break;
}
}
}
} else {
ret = 1;
}
}
zpool_close(zhp);
return (ret);
}
/*
* zpool offline [-ft] <pool> <device> ...
*
* -f Force the device into a faulted state.
*
* -t Only take the device off-line temporarily. The offline/faulted
* state will not be persistent across reboots.
*/
int
zpool_do_offline(int argc, char **argv)
{
int c, i;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
boolean_t istmp = B_FALSE;
boolean_t fault = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "ft")) != -1) {
switch (c) {
case 'f':
fault = B_TRUE;
break;
case 't':
istmp = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing device name\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
for (i = 1; i < argc; i++) {
if (fault) {
uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
vdev_aux_t aux;
if (istmp == B_FALSE) {
/* Force the fault to persist across imports */
aux = VDEV_AUX_EXTERNAL_PERSIST;
} else {
aux = VDEV_AUX_EXTERNAL;
}
if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)
ret = 1;
} else {
if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
ret = 1;
}
}
zpool_close(zhp);
return (ret);
}
/*
* zpool clear <pool> [device]
*
* Clear all errors associated with a pool or a particular device.
*/
int
zpool_do_clear(int argc, char **argv)
{
int c;
int ret = 0;
boolean_t dryrun = B_FALSE;
boolean_t do_rewind = B_FALSE;
boolean_t xtreme_rewind = B_FALSE;
uint32_t rewind_policy = ZPOOL_NO_REWIND;
nvlist_t *policy = NULL;
zpool_handle_t *zhp;
char *pool, *device;
/* check options */
while ((c = getopt(argc, argv, "FnX")) != -1) {
switch (c) {
case 'F':
do_rewind = B_TRUE;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'X':
xtreme_rewind = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if ((dryrun || xtreme_rewind) && !do_rewind) {
(void) fprintf(stderr,
gettext("-n or -X only meaningful with -F\n"));
usage(B_FALSE);
}
if (dryrun)
rewind_policy = ZPOOL_TRY_REWIND;
else if (do_rewind)
rewind_policy = ZPOOL_DO_REWIND;
if (xtreme_rewind)
rewind_policy |= ZPOOL_EXTREME_REWIND;
/* In future, further rewind policy choices can be passed along here */
if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
rewind_policy) != 0) {
return (1);
}
pool = argv[0];
device = argc == 2 ? argv[1] : NULL;
if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
nvlist_free(policy);
return (1);
}
if (zpool_clear(zhp, device, policy) != 0)
ret = 1;
zpool_close(zhp);
nvlist_free(policy);
return (ret);
}
/*
* zpool reguid <pool>
*/
int
zpool_do_reguid(int argc, char **argv)
{
int c;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
ret = zpool_reguid(zhp);
zpool_close(zhp);
return (ret);
}
/*
* zpool reopen <pool>
*
* Reopen the pool so that the kernel can update the sizes of all vdevs.
*/
int
zpool_do_reopen(int argc, char **argv)
{
int c;
int ret = 0;
boolean_t scrub_restart = B_TRUE;
/* check options */
while ((c = getopt(argc, argv, "n")) != -1) {
switch (c) {
case 'n':
scrub_restart = B_FALSE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* if argc == 0 we will execute zpool_reopen_one on all pools */
ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, zpool_reopen_one, &scrub_restart);
return (ret);
}
typedef struct scrub_cbdata {
int cb_type;
pool_scrub_cmd_t cb_scrub_cmd;
} scrub_cbdata_t;
static boolean_t
zpool_has_checkpoint(zpool_handle_t *zhp)
{
nvlist_t *config, *nvroot;
config = zpool_get_config(zhp, NULL);
if (config != NULL) {
pool_checkpoint_stat_t *pcs = NULL;
uint_t c;
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
if (pcs == NULL || pcs->pcs_state == CS_NONE)
return (B_FALSE);
assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS ||
pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
return (B_TRUE);
}
return (B_FALSE);
}
static int
scrub_callback(zpool_handle_t *zhp, void *data)
{
scrub_cbdata_t *cb = data;
int err;
/*
* Ignore faulted pools.
*/
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
(void) fprintf(stderr, gettext("cannot scan '%s': pool is "
"currently unavailable\n"), zpool_get_name(zhp));
return (1);
}
err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd);
if (err == 0 && zpool_has_checkpoint(zhp) &&
cb->cb_type == POOL_SCAN_SCRUB) {
(void) printf(gettext("warning: will not scrub state that "
"belongs to the checkpoint of pool '%s'\n"),
zpool_get_name(zhp));
}
return (err != 0);
}
static int
wait_callback(zpool_handle_t *zhp, void *data)
{
zpool_wait_activity_t *act = data;
return (zpool_wait(zhp, *act));
}
/*
* zpool scrub [-s | -p] [-w] [-e] <pool> ...
*
* -e Only scrub blocks in the error log.
* -s Stop. Stops any in-progress scrub.
* -p Pause. Pause in-progress scrub.
* -w Wait. Blocks until scrub has completed.
*/
int
zpool_do_scrub(int argc, char **argv)
{
int c;
scrub_cbdata_t cb;
boolean_t wait = B_FALSE;
int error;
cb.cb_type = POOL_SCAN_SCRUB;
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
boolean_t is_error_scrub = B_FALSE;
boolean_t is_pause = B_FALSE;
boolean_t is_stop = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "spwe")) != -1) {
switch (c) {
case 'e':
is_error_scrub = B_TRUE;
break;
case 's':
is_stop = B_TRUE;
break;
case 'p':
is_pause = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (is_pause && is_stop) {
(void) fprintf(stderr, gettext("invalid option "
"combination :-s and -p are mutually exclusive\n"));
usage(B_FALSE);
} else {
if (is_error_scrub)
cb.cb_type = POOL_SCAN_ERRORSCRUB;
if (is_pause) {
cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
} else if (is_stop) {
cb.cb_type = POOL_SCAN_NONE;
} else {
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
}
}
if (wait && (cb.cb_type == POOL_SCAN_NONE ||
cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {
(void) fprintf(stderr, gettext("invalid option combination: "
"-w cannot be used with -p or -s\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, scrub_callback, &cb);
if (wait && !error) {
zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;
error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, wait_callback, &act);
}
return (error);
}
/*
* zpool resilver <pool> ...
*
* Restarts any in-progress resilver
*/
int
zpool_do_resilver(int argc, char **argv)
{
int c;
scrub_cbdata_t cb;
cb.cb_type = POOL_SCAN_RESILVER;
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, scrub_callback, &cb));
}
/*
* zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...]
*
* -c Cancel. Ends any in-progress trim.
* -d Secure trim. Requires kernel and device support.
* -r <rate> Sets the TRIM rate in bytes (per second). Supports
* adding a multiplier suffix such as 'k' or 'm'.
* -s Suspend. TRIM can then be restarted with no flags.
* -w Wait. Blocks until trimming has completed.
*/
int
zpool_do_trim(int argc, char **argv)
{
struct option long_options[] = {
{"cancel", no_argument, NULL, 'c'},
{"secure", no_argument, NULL, 'd'},
{"rate", required_argument, NULL, 'r'},
{"suspend", no_argument, NULL, 's'},
{"wait", no_argument, NULL, 'w'},
{0, 0, 0, 0}
};
pool_trim_func_t cmd_type = POOL_TRIM_START;
uint64_t rate = 0;
boolean_t secure = B_FALSE;
boolean_t wait = B_FALSE;
int c;
while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL))
!= -1) {
switch (c) {
case 'c':
if (cmd_type != POOL_TRIM_START &&
cmd_type != POOL_TRIM_CANCEL) {
(void) fprintf(stderr, gettext("-c cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_TRIM_CANCEL;
break;
case 'd':
if (cmd_type != POOL_TRIM_START) {
(void) fprintf(stderr, gettext("-d cannot be "
"combined with the -c or -s options\n"));
usage(B_FALSE);
}
secure = B_TRUE;
break;
case 'r':
if (cmd_type != POOL_TRIM_START) {
(void) fprintf(stderr, gettext("-r cannot be "
"combined with the -c or -s options\n"));
usage(B_FALSE);
}
if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) {
(void) fprintf(stderr, "%s: %s\n",
gettext("invalid value for rate"),
libzfs_error_description(g_zfs));
usage(B_FALSE);
}
break;
case 's':
if (cmd_type != POOL_TRIM_START &&
cmd_type != POOL_TRIM_SUSPEND) {
(void) fprintf(stderr, gettext("-s cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_TRIM_SUSPEND;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
if (optopt != 0) {
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("invalid option '%s'\n"),
argv[optind - 1]);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
return (-1);
}
if (wait && (cmd_type != POOL_TRIM_START)) {
(void) fprintf(stderr, gettext("-w cannot be used with -c or "
"-s\n"));
usage(B_FALSE);
}
char *poolname = argv[0];
zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
if (zhp == NULL)
return (-1);
trimflags_t trim_flags = {
.secure = secure,
.rate = rate,
.wait = wait,
};
nvlist_t *vdevs = fnvlist_alloc();
if (argc == 1) {
/* no individual leaf vdevs specified, so add them all */
nvlist_t *config = zpool_get_config(zhp, NULL);
nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE);
zpool_collect_leaves(zhp, nvroot, vdevs);
trim_flags.fullpool = B_TRUE;
} else {
trim_flags.fullpool = B_FALSE;
for (int i = 1; i < argc; i++) {
fnvlist_add_boolean(vdevs, argv[i]);
}
}
int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags);
fnvlist_free(vdevs);
zpool_close(zhp);
return (error);
}
/*
* Converts a total number of seconds to a human readable string broken
* down in to days/hours/minutes/seconds.
*/
static void
secs_to_dhms(uint64_t total, char *buf)
{
uint64_t days = total / 60 / 60 / 24;
uint64_t hours = (total / 60 / 60) % 24;
uint64_t mins = (total / 60) % 60;
uint64_t secs = (total % 60);
if (days > 0) {
(void) sprintf(buf, "%llu days %02llu:%02llu:%02llu",
(u_longlong_t)days, (u_longlong_t)hours,
(u_longlong_t)mins, (u_longlong_t)secs);
} else {
(void) sprintf(buf, "%02llu:%02llu:%02llu",
(u_longlong_t)hours, (u_longlong_t)mins,
(u_longlong_t)secs);
}
}
/*
* Print out detailed error scrub status.
*/
static void
print_err_scrub_status(pool_scan_stat_t *ps)
{
time_t start, end, pause;
uint64_t total_secs_left;
uint64_t secs_left, mins_left, hours_left, days_left;
uint64_t examined, to_be_examined;
if (ps == NULL || ps->pss_error_scrub_func != POOL_SCAN_ERRORSCRUB) {
return;
}
(void) printf(gettext(" scrub: "));
start = ps->pss_error_scrub_start;
end = ps->pss_error_scrub_end;
pause = ps->pss_pass_error_scrub_pause;
examined = ps->pss_error_scrub_examined;
to_be_examined = ps->pss_error_scrub_to_be_examined;
assert(ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB);
if (ps->pss_error_scrub_state == DSS_FINISHED) {
total_secs_left = end - start;
days_left = total_secs_left / 60 / 60 / 24;
hours_left = (total_secs_left / 60 / 60) % 24;
mins_left = (total_secs_left / 60) % 60;
secs_left = (total_secs_left % 60);
(void) printf(gettext("scrubbed %llu error blocks in %llu days "
"%02llu:%02llu:%02llu on %s"), (u_longlong_t)examined,
(u_longlong_t)days_left, (u_longlong_t)hours_left,
(u_longlong_t)mins_left, (u_longlong_t)secs_left,
ctime(&end));
return;
} else if (ps->pss_error_scrub_state == DSS_CANCELED) {
(void) printf(gettext("error scrub canceled on %s"),
ctime(&end));
return;
}
assert(ps->pss_error_scrub_state == DSS_ERRORSCRUBBING);
/* Error scrub is in progress. */
if (pause == 0) {
(void) printf(gettext("error scrub in progress since %s"),
ctime(&start));
} else {
(void) printf(gettext("error scrub paused since %s"),
ctime(&pause));
(void) printf(gettext("\terror scrub started on %s"),
ctime(&start));
}
double fraction_done = (double)examined / (to_be_examined + examined);
(void) printf(gettext("\t%.2f%% done, issued I/O for %llu error"
" blocks"), 100 * fraction_done, (u_longlong_t)examined);
(void) printf("\n");
}
/*
* Print out detailed scrub status.
*/
static void
print_scan_scrub_resilver_status(pool_scan_stat_t *ps)
{
time_t start, end, pause;
uint64_t pass_scanned, scanned, pass_issued, issued, total_s, total_i;
uint64_t elapsed, scan_rate, issue_rate;
double fraction_done;
char processed_buf[7], scanned_buf[7], issued_buf[7], total_s_buf[7];
char total_i_buf[7], srate_buf[7], irate_buf[7], time_buf[32];
printf(" ");
printf_color(ANSI_BOLD, gettext("scan:"));
printf(" ");
/* If there's never been a scan, there's not much to say. */
if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||
ps->pss_func >= POOL_SCAN_FUNCS) {
(void) printf(gettext("none requested\n"));
return;
}
start = ps->pss_start_time;
end = ps->pss_end_time;
pause = ps->pss_pass_scrub_pause;
zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));
int is_resilver = ps->pss_func == POOL_SCAN_RESILVER;
int is_scrub = ps->pss_func == POOL_SCAN_SCRUB;
assert(is_resilver || is_scrub);
/* Scan is finished or canceled. */
if (ps->pss_state == DSS_FINISHED) {
secs_to_dhms(end - start, time_buf);
if (is_scrub) {
(void) printf(gettext("scrub repaired %s "
"in %s with %llu errors on %s"), processed_buf,
time_buf, (u_longlong_t)ps->pss_errors,
ctime(&end));
} else if (is_resilver) {
(void) printf(gettext("resilvered %s "
"in %s with %llu errors on %s"), processed_buf,
time_buf, (u_longlong_t)ps->pss_errors,
ctime(&end));
}
return;
} else if (ps->pss_state == DSS_CANCELED) {
if (is_scrub) {
(void) printf(gettext("scrub canceled on %s"),
ctime(&end));
} else if (is_resilver) {
(void) printf(gettext("resilver canceled on %s"),
ctime(&end));
}
return;
}
assert(ps->pss_state == DSS_SCANNING);
/* Scan is in progress. Resilvers can't be paused. */
if (is_scrub) {
if (pause == 0) {
(void) printf(gettext("scrub in progress since %s"),
ctime(&start));
} else {
(void) printf(gettext("scrub paused since %s"),
ctime(&pause));
(void) printf(gettext("\tscrub started on %s"),
ctime(&start));
}
} else if (is_resilver) {
(void) printf(gettext("resilver in progress since %s"),
ctime(&start));
}
scanned = ps->pss_examined;
pass_scanned = ps->pss_pass_exam;
issued = ps->pss_issued;
pass_issued = ps->pss_pass_issued;
total_s = ps->pss_to_examine;
total_i = ps->pss_to_examine - ps->pss_skipped;
/* we are only done with a block once we have issued the IO for it */
fraction_done = (double)issued / total_i;
/* elapsed time for this pass, rounding up to 1 if it's 0 */
elapsed = time(NULL) - ps->pss_pass_start;
elapsed -= ps->pss_pass_scrub_spent_paused;
elapsed = (elapsed != 0) ? elapsed : 1;
scan_rate = pass_scanned / elapsed;
issue_rate = pass_issued / elapsed;
/* format all of the numbers we will be reporting */
zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));
zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));
zfs_nicebytes(total_s, total_s_buf, sizeof (total_s_buf));
zfs_nicebytes(total_i, total_i_buf, sizeof (total_i_buf));
/* do not print estimated time if we have a paused scrub */
(void) printf(gettext("\t%s / %s scanned"), scanned_buf, total_s_buf);
if (pause == 0 && scan_rate > 0) {
zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));
(void) printf(gettext(" at %s/s"), srate_buf);
}
(void) printf(gettext(", %s / %s issued"), issued_buf, total_i_buf);
if (pause == 0 && issue_rate > 0) {
zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));
(void) printf(gettext(" at %s/s"), irate_buf);
}
(void) printf(gettext("\n"));
if (is_resilver) {
(void) printf(gettext("\t%s resilvered, %.2f%% done"),
processed_buf, 100 * fraction_done);
} else if (is_scrub) {
(void) printf(gettext("\t%s repaired, %.2f%% done"),
processed_buf, 100 * fraction_done);
}
if (pause == 0) {
/*
* Only provide an estimate iff:
* 1) we haven't yet issued all we expected, and
* 2) the issue rate exceeds 10 MB/s, and
* 3) it's either:
* a) a resilver which has started repairs, or
* b) a scrub which has entered the issue phase.
*/
if (total_i >= issued && issue_rate >= 10 * 1024 * 1024 &&
((is_resilver && ps->pss_processed > 0) ||
(is_scrub && issued > 0))) {
secs_to_dhms((total_i - issued) / issue_rate, time_buf);
(void) printf(gettext(", %s to go\n"), time_buf);
} else {
(void) printf(gettext(", no estimated "
"completion time\n"));
}
} else {
(void) printf(gettext("\n"));
}
}
static void
print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, uint_t c, char *vdev_name)
{
if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE)
return;
printf(" ");
printf_color(ANSI_BOLD, gettext("scan:"));
printf(" ");
uint64_t bytes_scanned = vrs->vrs_bytes_scanned;
uint64_t bytes_issued = vrs->vrs_bytes_issued;
uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt;
uint64_t bytes_est_s = vrs->vrs_bytes_est;
uint64_t bytes_est_i = vrs->vrs_bytes_est;
if (c > offsetof(vdev_rebuild_stat_t, vrs_pass_bytes_skipped) / 8)
bytes_est_i -= vrs->vrs_pass_bytes_skipped;
uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned /
(vrs->vrs_pass_time_ms + 1)) * 1000;
uint64_t issue_rate = (vrs->vrs_pass_bytes_issued /
(vrs->vrs_pass_time_ms + 1)) * 1000;
double scan_pct = MIN((double)bytes_scanned * 100 /
(bytes_est_s + 1), 100);
/* Format all of the numbers we will be reporting */
char bytes_scanned_buf[7], bytes_issued_buf[7];
char bytes_rebuilt_buf[7], bytes_est_s_buf[7], bytes_est_i_buf[7];
char scan_rate_buf[7], issue_rate_buf[7], time_buf[32];
zfs_nicebytes(bytes_scanned, bytes_scanned_buf,
sizeof (bytes_scanned_buf));
zfs_nicebytes(bytes_issued, bytes_issued_buf,
sizeof (bytes_issued_buf));
zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf,
sizeof (bytes_rebuilt_buf));
zfs_nicebytes(bytes_est_s, bytes_est_s_buf, sizeof (bytes_est_s_buf));
zfs_nicebytes(bytes_est_i, bytes_est_i_buf, sizeof (bytes_est_i_buf));
time_t start = vrs->vrs_start_time;
time_t end = vrs->vrs_end_time;
/* Rebuild is finished or canceled. */
if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) {
secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf);
(void) printf(gettext("resilvered (%s) %s in %s "
"with %llu errors on %s"), vdev_name, bytes_rebuilt_buf,
time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end));
return;
} else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) {
(void) printf(gettext("resilver (%s) canceled on %s"),
vdev_name, ctime(&end));
return;
} else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
(void) printf(gettext("resilver (%s) in progress since %s"),
vdev_name, ctime(&start));
}
assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE);
(void) printf(gettext("\t%s / %s scanned"), bytes_scanned_buf,
bytes_est_s_buf);
if (scan_rate > 0) {
zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf));
(void) printf(gettext(" at %s/s"), scan_rate_buf);
}
(void) printf(gettext(", %s / %s issued"), bytes_issued_buf,
bytes_est_i_buf);
if (issue_rate > 0) {
zfs_nicebytes(issue_rate, issue_rate_buf,
sizeof (issue_rate_buf));
(void) printf(gettext(" at %s/s"), issue_rate_buf);
}
(void) printf(gettext("\n"));
(void) printf(gettext("\t%s resilvered, %.2f%% done"),
bytes_rebuilt_buf, scan_pct);
if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
if (bytes_est_s >= bytes_scanned &&
scan_rate >= 10 * 1024 * 1024) {
secs_to_dhms((bytes_est_s - bytes_scanned) / scan_rate,
time_buf);
(void) printf(gettext(", %s to go\n"), time_buf);
} else {
(void) printf(gettext(", no estimated "
"completion time\n"));
}
} else {
(void) printf(gettext("\n"));
}
}
/*
* Print rebuild status for top-level vdevs.
*/
static void
print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot)
{
nvlist_t **child;
uint_t children;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (uint_t c = 0; c < children; c++) {
vdev_rebuild_stat_t *vrs;
uint_t i;
if (nvlist_lookup_uint64_array(child[c],
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
char *name = zpool_vdev_name(g_zfs, zhp,
child[c], VDEV_NAME_TYPE_ID);
print_rebuild_status_impl(vrs, i, name);
free(name);
}
}
}
/*
* As we don't scrub checkpointed blocks, we want to warn the user that we
* skipped scanning some blocks if a checkpoint exists or existed at any
* time during the scan. If a sequential instead of healing reconstruction
* was performed then the blocks were reconstructed. However, their checksums
* have not been verified so we still print the warning.
*/
static void
print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs)
{
if (ps == NULL || pcs == NULL)
return;
if (pcs->pcs_state == CS_NONE ||
pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
return;
assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS);
if (ps->pss_state == DSS_NONE)
return;
if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) &&
ps->pss_end_time < pcs->pcs_start_time)
return;
if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) {
(void) printf(gettext(" scan warning: skipped blocks "
"that are only referenced by the checkpoint.\n"));
} else {
assert(ps->pss_state == DSS_SCANNING);
(void) printf(gettext(" scan warning: skipping blocks "
"that are only referenced by the checkpoint.\n"));
}
}
/*
* Returns B_TRUE if there is an active rebuild in progress. Otherwise,
* B_FALSE is returned and 'rebuild_end_time' is set to the end time for
* the last completed (or cancelled) rebuild.
*/
static boolean_t
check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time)
{
nvlist_t **child;
uint_t children;
boolean_t rebuilding = B_FALSE;
uint64_t end_time = 0;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (uint_t c = 0; c < children; c++) {
vdev_rebuild_stat_t *vrs;
uint_t i;
if (nvlist_lookup_uint64_array(child[c],
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
if (vrs->vrs_end_time > end_time)
end_time = vrs->vrs_end_time;
if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
rebuilding = B_TRUE;
end_time = 0;
break;
}
}
}
if (rebuild_end_time != NULL)
*rebuild_end_time = end_time;
return (rebuilding);
}
/*
* Print the scan status.
*/
static void
print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot)
{
uint64_t rebuild_end_time = 0, resilver_end_time = 0;
boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE;
boolean_t have_errorscrub = B_FALSE;
boolean_t active_resilver = B_FALSE;
pool_checkpoint_stat_t *pcs = NULL;
pool_scan_stat_t *ps = NULL;
uint_t c;
time_t scrub_start = 0, errorscrub_start = 0;
if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
(uint64_t **)&ps, &c) == 0) {
if (ps->pss_func == POOL_SCAN_RESILVER) {
resilver_end_time = ps->pss_end_time;
active_resilver = (ps->pss_state == DSS_SCANNING);
}
have_resilver = (ps->pss_func == POOL_SCAN_RESILVER);
have_scrub = (ps->pss_func == POOL_SCAN_SCRUB);
scrub_start = ps->pss_start_time;
if (c > offsetof(pool_scan_stat_t,
pss_pass_error_scrub_pause) / 8) {
have_errorscrub = (ps->pss_error_scrub_func ==
POOL_SCAN_ERRORSCRUB);
errorscrub_start = ps->pss_error_scrub_start;
}
}
boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time);
boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0));
/* Always print the scrub status when available. */
if (have_scrub && scrub_start > errorscrub_start)
print_scan_scrub_resilver_status(ps);
else if (have_errorscrub && errorscrub_start >= scrub_start)
print_err_scrub_status(ps);
/*
* When there is an active resilver or rebuild print its status.
* Otherwise print the status of the last resilver or rebuild.
*/
if (active_resilver || (!active_rebuild && have_resilver &&
resilver_end_time && resilver_end_time > rebuild_end_time)) {
print_scan_scrub_resilver_status(ps);
} else if (active_rebuild || (!active_resilver && have_rebuild &&
rebuild_end_time && rebuild_end_time > resilver_end_time)) {
print_rebuild_status(zhp, nvroot);
}
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
print_checkpoint_scan_warning(ps, pcs);
}
/*
* Print out detailed removal status.
*/
static void
print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
{
char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7];
time_t start, end;
nvlist_t *config, *nvroot;
nvlist_t **child;
uint_t children;
char *vdev_name;
if (prs == NULL || prs->prs_state == DSS_NONE)
return;
/*
* Determine name of vdev.
*/
config = zpool_get_config(zhp, NULL);
nvroot = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE);
verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0);
assert(prs->prs_removing_vdev < children);
vdev_name = zpool_vdev_name(g_zfs, zhp,
child[prs->prs_removing_vdev], B_TRUE);
printf_color(ANSI_BOLD, gettext("remove: "));
start = prs->prs_start_time;
end = prs->prs_end_time;
zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf));
/*
* Removal is finished or canceled.
*/
if (prs->prs_state == DSS_FINISHED) {
uint64_t minutes_taken = (end - start) / 60;
(void) printf(gettext("Removal of vdev %llu copied %s "
"in %lluh%um, completed on %s"),
(longlong_t)prs->prs_removing_vdev,
copied_buf,
(u_longlong_t)(minutes_taken / 60),
(uint_t)(minutes_taken % 60),
ctime((time_t *)&end));
} else if (prs->prs_state == DSS_CANCELED) {
(void) printf(gettext("Removal of %s canceled on %s"),
vdev_name, ctime(&end));
} else {
uint64_t copied, total, elapsed, mins_left, hours_left;
double fraction_done;
uint_t rate;
assert(prs->prs_state == DSS_SCANNING);
/*
* Removal is in progress.
*/
(void) printf(gettext(
"Evacuation of %s in progress since %s"),
vdev_name, ctime(&start));
copied = prs->prs_copied > 0 ? prs->prs_copied : 1;
total = prs->prs_to_copy;
fraction_done = (double)copied / total;
/* elapsed time for this pass */
elapsed = time(NULL) - prs->prs_start_time;
elapsed = elapsed > 0 ? elapsed : 1;
rate = copied / elapsed;
rate = rate > 0 ? rate : 1;
mins_left = ((total - copied) / rate) / 60;
hours_left = mins_left / 60;
zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
zfs_nicenum(total, total_buf, sizeof (total_buf));
zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
/*
* do not print estimated time if hours_left is more than
* 30 days
*/
(void) printf(gettext(
"\t%s copied out of %s at %s/s, %.2f%% done"),
examined_buf, total_buf, rate_buf, 100 * fraction_done);
if (hours_left < (30 * 24)) {
(void) printf(gettext(", %lluh%um to go\n"),
(u_longlong_t)hours_left, (uint_t)(mins_left % 60));
} else {
(void) printf(gettext(
", (copy is slow, no estimated time)\n"));
}
}
free(vdev_name);
if (prs->prs_mapping_memory > 0) {
char mem_buf[7];
zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf));
(void) printf(gettext(
"\t%s memory used for removed device mappings\n"),
mem_buf);
}
}
static void
print_checkpoint_status(pool_checkpoint_stat_t *pcs)
{
time_t start;
char space_buf[7];
if (pcs == NULL || pcs->pcs_state == CS_NONE)
return;
(void) printf(gettext("checkpoint: "));
start = pcs->pcs_start_time;
zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf));
if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) {
char *date = ctime(&start);
/*
* ctime() adds a newline at the end of the generated
* string, thus the weird format specifier and the
* strlen() call used to chop it off from the output.
*/
(void) printf(gettext("created %.*s, consumes %s\n"),
(int)(strlen(date) - 1), date, space_buf);
return;
}
assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
(void) printf(gettext("discarding, %s remaining.\n"),
space_buf);
}
static void
print_error_log(zpool_handle_t *zhp)
{
nvlist_t *nverrlist = NULL;
nvpair_t *elem;
char *pathname;
size_t len = MAXPATHLEN * 2;
if (zpool_get_errlog(zhp, &nverrlist) != 0)
return;
(void) printf("errors: Permanent errors have been "
"detected in the following files:\n\n");
pathname = safe_malloc(len);
elem = NULL;
while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {
nvlist_t *nv;
uint64_t dsobj, obj;
verify(nvpair_value_nvlist(elem, &nv) == 0);
verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,
&dsobj) == 0);
verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,
&obj) == 0);
zpool_obj_to_path(zhp, dsobj, obj, pathname, len);
(void) printf("%7s %s\n", "", pathname);
}
free(pathname);
nvlist_free(nverrlist);
}
static void
print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,
uint_t nspares)
{
uint_t i;
char *name;
if (nspares == 0)
return;
(void) printf(gettext("\tspares\n"));
for (i = 0; i < nspares; i++) {
name = zpool_vdev_name(g_zfs, zhp, spares[i],
cb->cb_name_flags);
print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL);
free(name);
}
}
static void
print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,
uint_t nl2cache)
{
uint_t i;
char *name;
if (nl2cache == 0)
return;
(void) printf(gettext("\tcache\n"));
for (i = 0; i < nl2cache; i++) {
name = zpool_vdev_name(g_zfs, zhp, l2cache[i],
cb->cb_name_flags);
print_status_config(zhp, cb, name, l2cache[i], 2,
B_FALSE, NULL);
free(name);
}
}
static void
print_dedup_stats(nvlist_t *config)
{
ddt_histogram_t *ddh;
ddt_stat_t *dds;
ddt_object_t *ddo;
uint_t c;
char dspace[6], mspace[6];
/*
* If the pool was faulted then we may not have been able to
* obtain the config. Otherwise, if we have anything in the dedup
* table continue processing the stats.
*/
if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,
(uint64_t **)&ddo, &c) != 0)
return;
(void) printf("\n");
(void) printf(gettext(" dedup: "));
if (ddo->ddo_count == 0) {
(void) printf(gettext("no DDT entries\n"));
return;
}
zfs_nicebytes(ddo->ddo_dspace, dspace, sizeof (dspace));
zfs_nicebytes(ddo->ddo_mspace, mspace, sizeof (mspace));
(void) printf("DDT entries %llu, size %s on disk, %s in core\n",
(u_longlong_t)ddo->ddo_count,
dspace,
mspace);
verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
(uint64_t **)&dds, &c) == 0);
verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,
(uint64_t **)&ddh, &c) == 0);
zpool_dump_ddt(dds, ddh);
}
/*
* Display a summary of pool status. Displays a summary such as:
*
* pool: tank
* status: DEGRADED
* reason: One or more devices ...
* see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01
* config:
* mirror DEGRADED
* c1t0d0 OK
* c2t0d0 UNAVAIL
*
* When given the '-v' option, we print out the complete config. If the '-e'
* option is specified, then we print out error rate information as well.
*/
static int
status_callback(zpool_handle_t *zhp, void *data)
{
status_cbdata_t *cbp = data;
nvlist_t *config, *nvroot;
const char *msgid;
zpool_status_t reason;
zpool_errata_t errata;
const char *health;
uint_t c;
vdev_stat_t *vs;
config = zpool_get_config(zhp, NULL);
reason = zpool_get_status(zhp, &msgid, &errata);
cbp->cb_count++;
/*
* If we were given 'zpool status -x', only report those pools with
* problems.
*/
if (cbp->cb_explain &&
(reason == ZPOOL_STATUS_OK ||
reason == ZPOOL_STATUS_VERSION_OLDER ||
reason == ZPOOL_STATUS_FEAT_DISABLED ||
reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
if (!cbp->cb_allpools) {
(void) printf(gettext("pool '%s' is healthy\n"),
zpool_get_name(zhp));
if (cbp->cb_first)
cbp->cb_first = B_FALSE;
}
return (0);
}
if (cbp->cb_first)
cbp->cb_first = B_FALSE;
else
(void) printf("\n");
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
health = zpool_get_state_str(zhp);
printf(" ");
printf_color(ANSI_BOLD, gettext("pool:"));
printf(" %s\n", zpool_get_name(zhp));
fputc(' ', stdout);
printf_color(ANSI_BOLD, gettext("state: "));
printf_color(health_str_to_color(health), "%s", health);
fputc('\n', stdout);
switch (reason) {
case ZPOOL_STATUS_MISSING_DEV_R:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices could "
"not be opened. Sufficient replicas exist for\n\tthe pool "
"to continue functioning in a degraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Attach the missing device "
"and online it using 'zpool online'.\n"));
break;
case ZPOOL_STATUS_MISSING_DEV_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices could "
"not be opened. There are insufficient\n\treplicas for the"
" pool to continue functioning.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Attach the missing device "
"and online it using 'zpool online'.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_R:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices could "
"not be used because the label is missing or\n\tinvalid. "
"Sufficient replicas exist for the pool to continue\n\t"
"functioning in a degraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Replace the device using "
"'zpool replace'.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices could "
"not be used because the label is missing \n\tor invalid. "
"There are insufficient replicas for the pool to "
"continue\n\tfunctioning.\n"));
zpool_explain_recover(zpool_get_handle(zhp),
zpool_get_name(zhp), reason, config);
break;
case ZPOOL_STATUS_FAILING_DEV:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices has "
"experienced an unrecoverable error. An\n\tattempt was "
"made to correct the error. Applications are "
"unaffected.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Determine if the "
"device needs to be replaced, and clear the errors\n\tusing"
" 'zpool clear' or replace the device with 'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_OFFLINE_DEV:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices has "
"been taken offline by the administrator.\n\tSufficient "
"replicas exist for the pool to continue functioning in "
"a\n\tdegraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Online the device "
"using 'zpool online' or replace the device with\n\t'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_REMOVED_DEV:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices has "
"been removed by the administrator.\n\tSufficient "
"replicas exist for the pool to continue functioning in "
"a\n\tdegraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Online the device "
"using zpool online' or replace the device with\n\t'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_RESILVERING:
case ZPOOL_STATUS_REBUILDING:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices is "
"currently being resilvered. The pool will\n\tcontinue "
"to function, possibly in a degraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Wait for the resilver to "
"complete.\n"));
break;
case ZPOOL_STATUS_REBUILD_SCRUB:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices have "
"been sequentially resilvered, scrubbing\n\tthe pool "
"is recommended.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Use 'zpool scrub' to "
"verify all data checksums.\n"));
break;
case ZPOOL_STATUS_CORRUPT_DATA:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices has "
"experienced an error resulting in data\n\tcorruption. "
"Applications may be affected.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Restore the file in question"
" if possible. Otherwise restore the\n\tentire pool from "
"backup.\n"));
break;
case ZPOOL_STATUS_CORRUPT_POOL:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool metadata is "
"corrupted and the pool cannot be opened.\n"));
zpool_explain_recover(zpool_get_handle(zhp),
zpool_get_name(zhp), reason, config);
break;
case ZPOOL_STATUS_VERSION_OLDER:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
"a legacy on-disk format. The pool can\n\tstill be used, "
"but some features are unavailable.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Upgrade the pool using "
"'zpool upgrade'. Once this is done, the\n\tpool will no "
"longer be accessible on software that does not support\n\t"
"feature flags.\n"));
break;
case ZPOOL_STATUS_VERSION_NEWER:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool has been upgraded "
"to a newer, incompatible on-disk version.\n\tThe pool "
"cannot be accessed on this system.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Access the pool from a "
"system running more recent software, or\n\trestore the "
"pool from backup.\n"));
break;
case ZPOOL_STATUS_FEAT_DISABLED:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Some supported and "
"requested features are not enabled on the pool.\n\t"
"The pool can still be used, but some features are "
"unavailable.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Enable all features using "
"'zpool upgrade'. Once this is done,\n\tthe pool may no "
"longer be accessible by software that does not support\n\t"
"the features. See zpool-features(7) for details.\n"));
break;
case ZPOOL_STATUS_COMPATIBILITY_ERR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("This pool has a "
"compatibility list specified, but it could not be\n\t"
"read/parsed at this time. The pool can still be used, "
"but this\n\tshould be investigated.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Check the value of the "
"'compatibility' property against the\n\t"
"appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "
ZPOOL_DATA_COMPAT_D ".\n"));
break;
case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more features "
"are enabled on the pool despite not being\n\t"
"requested by the 'compatibility' property.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Consider setting "
"'compatibility' to an appropriate value, or\n\t"
"adding needed features to the relevant file in\n\t"
ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
"on this system because it uses the\n\tfollowing feature(s)"
" not supported on this system:\n"));
zpool_print_unsup_feat(config);
(void) printf("\n");
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Access the pool from a "
"system that supports the required feature(s),\n\tor "
"restore the pool from backup.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool can only be "
"accessed in read-only mode on this system. It\n\tcannot be"
" accessed in read-write mode because it uses the "
"following\n\tfeature(s) not supported on this system:\n"));
zpool_print_unsup_feat(config);
(void) printf("\n");
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
"in read-write mode. Import the pool with\n"
"\t\"-o readonly=on\", access the pool from a system that "
"supports the\n\trequired feature(s), or restore the "
"pool from backup.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_R:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"faulted in response to persistent errors.\n\tSufficient "
"replicas exist for the pool to continue functioning "
"in a\n\tdegraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Replace the faulted device, "
"or use 'zpool clear' to mark the device\n\trepaired.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"faulted in response to persistent errors. There are "
"insufficient replicas for the pool to\n\tcontinue "
"functioning.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Destroy and re-create the "
"pool from a backup source. Manually marking the device\n"
"\trepaired using 'zpool clear' may allow some data "
"to be recovered.\n"));
break;
case ZPOOL_STATUS_IO_FAILURE_MMP:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is suspended "
"because multihost writes failed or were delayed;\n\t"
"another system could import the pool undetected.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Make sure the pool's devices"
" are connected, then reboot your system and\n\timport the "
"pool.\n"));
break;
case ZPOOL_STATUS_IO_FAILURE_WAIT:
case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"faulted in response to IO failures.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Make sure the affected "
"devices are connected, then run 'zpool clear'.\n"));
break;
case ZPOOL_STATUS_BAD_LOG:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("An intent log record "
"could not be read.\n"
"\tWaiting for administrator intervention to fix the "
"faulted pool.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Either restore the affected "
"device(s) and run 'zpool online',\n"
"\tor ignore the intent log records by running "
"'zpool clear'.\n"));
break;
case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
(void) printf(gettext("status: One or more devices are "
"configured to use a non-native block size.\n"
"\tExpect reduced performance.\n"));
(void) printf(gettext("action: Replace affected devices with "
"devices that support the\n\tconfigured block size, or "
"migrate data to a properly configured\n\tpool.\n"));
break;
case ZPOOL_STATUS_HOSTID_MISMATCH:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Mismatch between pool hostid"
" and system hostid on imported pool.\n\tThis pool was "
"previously imported into a system with a different "
"hostid,\n\tand then was verbatim imported into this "
"system.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Export this pool on all "
"systems on which it is imported.\n"
"\tThen import it to correct the mismatch.\n"));
break;
case ZPOOL_STATUS_ERRATA:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
errata);
switch (errata) {
case ZPOOL_ERRATA_NONE:
break;
case ZPOOL_ERRATA_ZOL_2094_SCRUB:
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("To correct the issue"
" run 'zpool scrub'.\n"));
break;
case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
(void) printf(gettext("\tExisting encrypted datasets "
"contain an on-disk incompatibility\n\twhich "
"needs to be corrected.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("To correct the issue"
" backup existing encrypted datasets to new\n\t"
"encrypted datasets and destroy the old ones. "
"'zfs mount -o ro' can\n\tbe used to temporarily "
"mount existing encrypted datasets readonly.\n"));
break;
case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
(void) printf(gettext("\tExisting encrypted snapshots "
"and bookmarks contain an on-disk\n\tincompat"
"ibility. This may cause on-disk corruption if "
"they are used\n\twith 'zfs recv'.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("To correct the"
"issue, enable the bookmark_v2 feature. No "
"additional\n\taction is needed if there are no "
"encrypted snapshots or bookmarks.\n\tIf preserving"
"the encrypted snapshots and bookmarks is required,"
" use\n\ta non-raw send to backup and restore them."
" Alternately, they may be\n\tremoved to resolve "
"the incompatibility.\n"));
break;
default:
/*
* All errata which allow the pool to be imported
* must contain an action message.
*/
assert(0);
}
break;
default:
/*
* The remaining errors can't actually be generated, yet.
*/
assert(reason == ZPOOL_STATUS_OK);
}
if (msgid != NULL) {
printf(" ");
printf_color(ANSI_BOLD, gettext("see:"));
printf(gettext(
" https://openzfs.github.io/openzfs-docs/msg/%s\n"),
msgid);
}
if (config != NULL) {
uint64_t nerr;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
pool_checkpoint_stat_t *pcs = NULL;
pool_removal_stat_t *prs = NULL;
print_scan_status(zhp, nvroot);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
print_removal_status(zhp, prs);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
print_checkpoint_status(pcs);
cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,
cbp->cb_name_flags | VDEV_NAME_TYPE_ID);
if (cbp->cb_namewidth < 10)
cbp->cb_namewidth = 10;
color_start(ANSI_BOLD);
(void) printf(gettext("config:\n\n"));
(void) printf(gettext("\t%-*s %-8s %5s %5s %5s"),
cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",
"CKSUM");
color_end();
if (cbp->cb_print_slow_ios) {
printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));
}
if (cbp->vcdl != NULL)
print_cmd_columns(cbp->vcdl, 0);
printf("\n");
print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,
B_FALSE, NULL);
print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP);
print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0)
print_l2cache(zhp, cbp, l2cache, nl2cache);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0)
print_spares(zhp, cbp, spares, nspares);
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
&nerr) == 0) {
(void) printf("\n");
if (nerr == 0) {
(void) printf(gettext(
"errors: No known data errors\n"));
} else if (!cbp->cb_verbose) {
(void) printf(gettext("errors: %llu data "
"errors, use '-v' for a list\n"),
(u_longlong_t)nerr);
} else {
print_error_log(zhp);
}
}
if (cbp->cb_dedup_stats)
print_dedup_stats(config);
} else {
(void) printf(gettext("config: The configuration cannot be "
"determined.\n"));
}
return (0);
}
/*
* zpool status [-c [script1,script2,...]] [-igLpPstvx] [-T d|u] [pool] ...
* [interval [count]]
*
* -c CMD For each vdev, run command CMD
* -i Display vdev initialization status.
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -p Display values in parsable (exact) format.
* -P Display full path for vdev name.
* -s Display slow IOs column.
* -v Display complete error logs
* -x Display only pools with potential problems
* -D Display dedup status (undocumented)
* -t Display vdev TRIM status.
* -T Display a timestamp in date(1) or Unix format
*
* Describes the health status of all pools or some subset.
*/
int
zpool_do_status(int argc, char **argv)
{
int c;
int ret;
float interval = 0;
unsigned long count = 0;
status_cbdata_t cb = { 0 };
char *cmd = NULL;
/* check options */
while ((c = getopt(argc, argv, "c:igLpPsvxDtT:")) != -1) {
switch (c) {
case 'c':
if (cmd != NULL) {
fprintf(stderr,
gettext("Can't set -c flag twice\n"));
exit(1);
}
if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
fprintf(stderr, gettext(
"Can't run -c, disabled by "
"ZPOOL_SCRIPTS_ENABLED.\n"));
exit(1);
}
if ((getuid() <= 0 || geteuid() <= 0) &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
fprintf(stderr, gettext(
"Can't run -c with root privileges "
"unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
exit(1);
}
cmd = optarg;
break;
case 'i':
cb.cb_print_vdev_init = B_TRUE;
break;
case 'g':
cb.cb_name_flags |= VDEV_NAME_GUID;
break;
case 'L':
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'P':
cb.cb_name_flags |= VDEV_NAME_PATH;
break;
case 's':
cb.cb_print_slow_ios = B_TRUE;
break;
case 'v':
cb.cb_verbose = B_TRUE;
break;
case 'x':
cb.cb_explain = B_TRUE;
break;
case 'D':
cb.cb_dedup_stats = B_TRUE;
break;
case 't':
cb.cb_print_vdev_trim = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case '?':
if (optopt == 'c') {
print_zpool_script_list("status");
exit(0);
} else {
fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
get_interval_count(&argc, argv, &interval, &count);
if (argc == 0)
cb.cb_allpools = B_TRUE;
cb.cb_first = B_TRUE;
cb.cb_print_status = B_TRUE;
for (;;) {
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
if (cmd != NULL)
cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
NULL, NULL, 0, 0);
ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
cb.cb_literal, status_callback, &cb);
if (cb.vcdl != NULL)
free_vdev_cmd_data_list(cb.vcdl);
if (argc == 0 && cb.cb_count == 0)
(void) fprintf(stderr, gettext("no pools available\n"));
else if (cb.cb_explain && cb.cb_first && cb.cb_allpools)
(void) printf(gettext("all pools are healthy\n"));
if (ret != 0)
return (ret);
if (interval == 0)
break;
if (count != 0 && --count == 0)
break;
(void) fsleep(interval);
}
return (0);
}
typedef struct upgrade_cbdata {
int cb_first;
int cb_argc;
uint64_t cb_version;
char **cb_argv;
} upgrade_cbdata_t;
static int
check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
{
int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
int *count = (int *)unsupp_fs;
if (zfs_version > ZPL_VERSION) {
(void) printf(gettext("%s (v%d) is not supported by this "
"implementation of ZFS.\n"),
zfs_get_name(zhp), zfs_version);
(*count)++;
}
zfs_iter_filesystems_v2(zhp, 0, check_unsupp_fs, unsupp_fs);
zfs_close(zhp);
return (0);
}
static int
upgrade_version(zpool_handle_t *zhp, uint64_t version)
{
int ret;
nvlist_t *config;
uint64_t oldversion;
int unsupp_fs = 0;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&oldversion) == 0);
char compat[ZFS_MAXPROPLEN];
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
compat[0] = '\0';
assert(SPA_VERSION_IS_SUPPORTED(oldversion));
assert(oldversion < version);
ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs);
if (ret != 0)
return (ret);
if (unsupp_fs) {
(void) fprintf(stderr, gettext("Upgrade not performed due "
"to %d unsupported filesystems (max v%d).\n"),
unsupp_fs, (int)ZPL_VERSION);
return (1);
}
if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
(void) fprintf(stderr, gettext("Upgrade not performed because "
"'compatibility' property set to '"
ZPOOL_COMPAT_LEGACY "'.\n"));
return (1);
}
ret = zpool_upgrade(zhp, version);
if (ret != 0)
return (ret);
if (version >= SPA_VERSION_FEATURES) {
(void) printf(gettext("Successfully upgraded "
"'%s' from version %llu to feature flags.\n"),
zpool_get_name(zhp), (u_longlong_t)oldversion);
} else {
(void) printf(gettext("Successfully upgraded "
"'%s' from version %llu to version %llu.\n"),
zpool_get_name(zhp), (u_longlong_t)oldversion,
(u_longlong_t)version);
}
return (0);
}
static int
upgrade_enable_all(zpool_handle_t *zhp, int *countp)
{
int i, ret, count;
boolean_t firstff = B_TRUE;
nvlist_t *enabled = zpool_get_features(zhp);
char compat[ZFS_MAXPROPLEN];
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
compat[0] = '\0';
boolean_t requested_features[SPA_FEATURES];
if (zpool_do_load_compat(compat, requested_features) !=
ZPOOL_COMPATIBILITY_OK)
return (-1);
count = 0;
for (i = 0; i < SPA_FEATURES; i++) {
const char *fname = spa_feature_table[i].fi_uname;
const char *fguid = spa_feature_table[i].fi_guid;
if (!spa_feature_table[i].fi_zfs_mod_supported)
continue;
if (!nvlist_exists(enabled, fguid) && requested_features[i]) {
char *propname;
verify(-1 != asprintf(&propname, "feature@%s", fname));
ret = zpool_set_prop(zhp, propname,
ZFS_FEATURE_ENABLED);
if (ret != 0) {
free(propname);
return (ret);
}
count++;
if (firstff) {
(void) printf(gettext("Enabled the "
"following features on '%s':\n"),
zpool_get_name(zhp));
firstff = B_FALSE;
}
(void) printf(gettext(" %s\n"), fname);
free(propname);
}
}
if (countp != NULL)
*countp = count;
return (0);
}
static int
upgrade_cb(zpool_handle_t *zhp, void *arg)
{
upgrade_cbdata_t *cbp = arg;
nvlist_t *config;
uint64_t version;
boolean_t modified_pool = B_FALSE;
int ret;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
assert(SPA_VERSION_IS_SUPPORTED(version));
if (version < cbp->cb_version) {
cbp->cb_first = B_FALSE;
ret = upgrade_version(zhp, cbp->cb_version);
if (ret != 0)
return (ret);
modified_pool = B_TRUE;
/*
* If they did "zpool upgrade -a", then we could
* be doing ioctls to different pools. We need
* to log this history once to each pool, and bypass
* the normal history logging that happens in main().
*/
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
if (cbp->cb_version >= SPA_VERSION_FEATURES) {
int count;
ret = upgrade_enable_all(zhp, &count);
if (ret != 0)
return (ret);
if (count > 0) {
cbp->cb_first = B_FALSE;
modified_pool = B_TRUE;
}
}
if (modified_pool) {
(void) printf("\n");
(void) after_zpool_upgrade(zhp);
}
return (0);
}
static int
upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)
{
upgrade_cbdata_t *cbp = arg;
nvlist_t *config;
uint64_t version;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
assert(SPA_VERSION_IS_SUPPORTED(version));
if (version < SPA_VERSION_FEATURES) {
if (cbp->cb_first) {
(void) printf(gettext("The following pools are "
"formatted with legacy version numbers and can\n"
"be upgraded to use feature flags. After "
"being upgraded, these pools\nwill no "
"longer be accessible by software that does not "
"support feature\nflags.\n\n"
"Note that setting a pool's 'compatibility' "
"feature to '" ZPOOL_COMPAT_LEGACY "' will\n"
"inhibit upgrades.\n\n"));
(void) printf(gettext("VER POOL\n"));
(void) printf(gettext("--- ------------\n"));
cbp->cb_first = B_FALSE;
}
(void) printf("%2llu %s\n", (u_longlong_t)version,
zpool_get_name(zhp));
}
return (0);
}
static int
upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
{
upgrade_cbdata_t *cbp = arg;
nvlist_t *config;
uint64_t version;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
if (version >= SPA_VERSION_FEATURES) {
int i;
boolean_t poolfirst = B_TRUE;
nvlist_t *enabled = zpool_get_features(zhp);
for (i = 0; i < SPA_FEATURES; i++) {
const char *fguid = spa_feature_table[i].fi_guid;
const char *fname = spa_feature_table[i].fi_uname;
if (!spa_feature_table[i].fi_zfs_mod_supported)
continue;
if (!nvlist_exists(enabled, fguid)) {
if (cbp->cb_first) {
(void) printf(gettext("\nSome "
"supported features are not "
"enabled on the following pools. "
"Once a\nfeature is enabled the "
"pool may become incompatible with "
"software\nthat does not support "
"the feature. See "
"zpool-features(7) for "
"details.\n\n"
"Note that the pool "
"'compatibility' feature can be "
"used to inhibit\nfeature "
"upgrades.\n\n"));
(void) printf(gettext("POOL "
"FEATURE\n"));
(void) printf(gettext("------"
"---------\n"));
cbp->cb_first = B_FALSE;
}
if (poolfirst) {
(void) printf(gettext("%s\n"),
zpool_get_name(zhp));
poolfirst = B_FALSE;
}
(void) printf(gettext(" %s\n"), fname);
}
/*
* If they did "zpool upgrade -a", then we could
* be doing ioctls to different pools. We need
* to log this history once to each pool, and bypass
* the normal history logging that happens in main().
*/
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
}
return (0);
}
static int
upgrade_one(zpool_handle_t *zhp, void *data)
{
boolean_t modified_pool = B_FALSE;
upgrade_cbdata_t *cbp = data;
uint64_t cur_version;
int ret;
if (strcmp("log", zpool_get_name(zhp)) == 0) {
(void) fprintf(stderr, gettext("'log' is now a reserved word\n"
"Pool 'log' must be renamed using export and import"
" to upgrade.\n"));
return (1);
}
cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if (cur_version > cbp->cb_version) {
(void) printf(gettext("Pool '%s' is already formatted "
"using more current version '%llu'.\n\n"),
zpool_get_name(zhp), (u_longlong_t)cur_version);
return (0);
}
if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
(void) printf(gettext("Pool '%s' is already formatted "
"using version %llu.\n\n"), zpool_get_name(zhp),
(u_longlong_t)cbp->cb_version);
return (0);
}
if (cur_version != cbp->cb_version) {
modified_pool = B_TRUE;
ret = upgrade_version(zhp, cbp->cb_version);
if (ret != 0)
return (ret);
}
if (cbp->cb_version >= SPA_VERSION_FEATURES) {
int count = 0;
ret = upgrade_enable_all(zhp, &count);
if (ret != 0)
return (ret);
if (count != 0) {
modified_pool = B_TRUE;
} else if (cur_version == SPA_VERSION) {
(void) printf(gettext("Pool '%s' already has all "
"supported and requested features enabled.\n"),
zpool_get_name(zhp));
}
}
if (modified_pool) {
(void) printf("\n");
(void) after_zpool_upgrade(zhp);
}
return (0);
}
/*
* zpool upgrade
* zpool upgrade -v
* zpool upgrade [-V version] <-a | pool ...>
*
* With no arguments, display downrev'd ZFS pool available for upgrade.
* Individual pools can be upgraded by specifying the pool, and '-a' will
* upgrade all pools.
*/
int
zpool_do_upgrade(int argc, char **argv)
{
int c;
upgrade_cbdata_t cb = { 0 };
int ret = 0;
boolean_t showversions = B_FALSE;
boolean_t upgradeall = B_FALSE;
char *end;
/* check options */
while ((c = getopt(argc, argv, ":avV:")) != -1) {
switch (c) {
case 'a':
upgradeall = B_TRUE;
break;
case 'v':
showversions = B_TRUE;
break;
case 'V':
cb.cb_version = strtoll(optarg, &end, 10);
if (*end != '\0' ||
!SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {
(void) fprintf(stderr,
gettext("invalid version '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
cb.cb_argc = argc;
cb.cb_argv = argv;
argc -= optind;
argv += optind;
if (cb.cb_version == 0) {
cb.cb_version = SPA_VERSION;
} else if (!upgradeall && argc == 0) {
(void) fprintf(stderr, gettext("-V option is "
"incompatible with other arguments\n"));
usage(B_FALSE);
}
if (showversions) {
if (upgradeall || argc != 0) {
(void) fprintf(stderr, gettext("-v option is "
"incompatible with other arguments\n"));
usage(B_FALSE);
}
} else if (upgradeall) {
if (argc != 0) {
(void) fprintf(stderr, gettext("-a option should not "
"be used along with a pool name\n"));
usage(B_FALSE);
}
}
(void) printf("%s", gettext("This system supports ZFS pool feature "
"flags.\n\n"));
if (showversions) {
int i;
(void) printf(gettext("The following features are "
"supported:\n\n"));
(void) printf(gettext("FEAT DESCRIPTION\n"));
(void) printf("----------------------------------------------"
"---------------\n");
for (i = 0; i < SPA_FEATURES; i++) {
zfeature_info_t *fi = &spa_feature_table[i];
if (!fi->fi_zfs_mod_supported)
continue;
const char *ro =
(fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
" (read-only compatible)" : "";
(void) printf("%-37s%s\n", fi->fi_uname, ro);
(void) printf(" %s\n", fi->fi_desc);
}
(void) printf("\n");
(void) printf(gettext("The following legacy versions are also "
"supported:\n\n"));
(void) printf(gettext("VER DESCRIPTION\n"));
(void) printf("--- -----------------------------------------"
"---------------\n");
(void) printf(gettext(" 1 Initial ZFS version\n"));
(void) printf(gettext(" 2 Ditto blocks "
"(replicated metadata)\n"));
(void) printf(gettext(" 3 Hot spares and double parity "
"RAID-Z\n"));
(void) printf(gettext(" 4 zpool history\n"));
(void) printf(gettext(" 5 Compression using the gzip "
"algorithm\n"));
(void) printf(gettext(" 6 bootfs pool property\n"));
(void) printf(gettext(" 7 Separate intent log devices\n"));
(void) printf(gettext(" 8 Delegated administration\n"));
(void) printf(gettext(" 9 refquota and refreservation "
"properties\n"));
(void) printf(gettext(" 10 Cache devices\n"));
(void) printf(gettext(" 11 Improved scrub performance\n"));
(void) printf(gettext(" 12 Snapshot properties\n"));
(void) printf(gettext(" 13 snapused property\n"));
(void) printf(gettext(" 14 passthrough-x aclinherit\n"));
(void) printf(gettext(" 15 user/group space accounting\n"));
(void) printf(gettext(" 16 stmf property support\n"));
(void) printf(gettext(" 17 Triple-parity RAID-Z\n"));
(void) printf(gettext(" 18 Snapshot user holds\n"));
(void) printf(gettext(" 19 Log device removal\n"));
(void) printf(gettext(" 20 Compression using zle "
"(zero-length encoding)\n"));
(void) printf(gettext(" 21 Deduplication\n"));
(void) printf(gettext(" 22 Received properties\n"));
(void) printf(gettext(" 23 Slim ZIL\n"));
(void) printf(gettext(" 24 System attributes\n"));
(void) printf(gettext(" 25 Improved scrub stats\n"));
(void) printf(gettext(" 26 Improved snapshot deletion "
"performance\n"));
(void) printf(gettext(" 27 Improved snapshot creation "
"performance\n"));
(void) printf(gettext(" 28 Multiple vdev replacements\n"));
(void) printf(gettext("\nFor more information on a particular "
"version, including supported releases,\n"));
(void) printf(gettext("see the ZFS Administration Guide.\n\n"));
} else if (argc == 0 && upgradeall) {
cb.cb_first = B_TRUE;
ret = zpool_iter(g_zfs, upgrade_cb, &cb);
if (ret == 0 && cb.cb_first) {
if (cb.cb_version == SPA_VERSION) {
(void) printf(gettext("All pools are already "
"formatted using feature flags.\n\n"));
(void) printf(gettext("Every feature flags "
"pool already has all supported and "
"requested features enabled.\n"));
} else {
(void) printf(gettext("All pools are already "
"formatted with version %llu or higher.\n"),
(u_longlong_t)cb.cb_version);
}
}
} else if (argc == 0) {
cb.cb_first = B_TRUE;
ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);
assert(ret == 0);
if (cb.cb_first) {
(void) printf(gettext("All pools are formatted "
"using feature flags.\n\n"));
} else {
(void) printf(gettext("\nUse 'zpool upgrade -v' "
"for a list of available legacy versions.\n"));
}
cb.cb_first = B_TRUE;
ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);
assert(ret == 0);
if (cb.cb_first) {
(void) printf(gettext("Every feature flags pool has "
"all supported and requested features enabled.\n"));
} else {
(void) printf(gettext("\n"));
}
} else {
ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
B_FALSE, upgrade_one, &cb);
}
return (ret);
}
typedef struct hist_cbdata {
boolean_t first;
boolean_t longfmt;
boolean_t internal;
} hist_cbdata_t;
static void
print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb)
{
nvlist_t **records;
uint_t numrecords;
int i;
verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,
&records, &numrecords) == 0);
for (i = 0; i < numrecords; i++) {
nvlist_t *rec = records[i];
char tbuf[64] = "";
if (nvlist_exists(rec, ZPOOL_HIST_TIME)) {
time_t tsec;
struct tm t;
tsec = fnvlist_lookup_uint64(records[i],
ZPOOL_HIST_TIME);
(void) localtime_r(&tsec, &t);
(void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
}
if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) {
uint64_t elapsed_ns = fnvlist_lookup_int64(records[i],
ZPOOL_HIST_ELAPSED_NS);
(void) snprintf(tbuf + strlen(tbuf),
sizeof (tbuf) - strlen(tbuf),
" (%lldms)", (long long)elapsed_ns / 1000 / 1000);
}
if (nvlist_exists(rec, ZPOOL_HIST_CMD)) {
(void) printf("%s %s", tbuf,
fnvlist_lookup_string(rec, ZPOOL_HIST_CMD));
} else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) {
int ievent =
fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT);
if (!cb->internal)
continue;
if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) {
(void) printf("%s unrecognized record:\n",
tbuf);
dump_nvlist(rec, 4);
continue;
}
(void) printf("%s [internal %s txg:%lld] %s", tbuf,
zfs_history_event_names[ievent],
(longlong_t)fnvlist_lookup_uint64(
rec, ZPOOL_HIST_TXG),
fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
if (!cb->internal)
continue;
(void) printf("%s [txg:%lld] %s", tbuf,
(longlong_t)fnvlist_lookup_uint64(
rec, ZPOOL_HIST_TXG),
fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {
(void) printf(" %s (%llu)",
fnvlist_lookup_string(rec,
ZPOOL_HIST_DSNAME),
(u_longlong_t)fnvlist_lookup_uint64(rec,
ZPOOL_HIST_DSID));
}
(void) printf(" %s", fnvlist_lookup_string(rec,
ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) {
if (!cb->internal)
continue;
(void) printf("%s ioctl %s\n", tbuf,
fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL));
if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) {
(void) printf(" input:\n");
dump_nvlist(fnvlist_lookup_nvlist(rec,
ZPOOL_HIST_INPUT_NVL), 8);
}
if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) {
(void) printf(" output:\n");
dump_nvlist(fnvlist_lookup_nvlist(rec,
ZPOOL_HIST_OUTPUT_NVL), 8);
}
if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) {
(void) printf(" output nvlist omitted; "
"original size: %lldKB\n",
(longlong_t)fnvlist_lookup_int64(rec,
ZPOOL_HIST_OUTPUT_SIZE) / 1024);
}
if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {
(void) printf(" errno: %lld\n",
(longlong_t)fnvlist_lookup_int64(rec,
ZPOOL_HIST_ERRNO));
}
} else {
if (!cb->internal)
continue;
(void) printf("%s unrecognized record:\n", tbuf);
dump_nvlist(rec, 4);
}
if (!cb->longfmt) {
(void) printf("\n");
continue;
}
(void) printf(" [");
if (nvlist_exists(rec, ZPOOL_HIST_WHO)) {
uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO);
struct passwd *pwd = getpwuid(who);
(void) printf("user %d ", (int)who);
if (pwd != NULL)
(void) printf("(%s) ", pwd->pw_name);
}
if (nvlist_exists(rec, ZPOOL_HIST_HOST)) {
(void) printf("on %s",
fnvlist_lookup_string(rec, ZPOOL_HIST_HOST));
}
if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) {
(void) printf(":%s",
fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE));
}
(void) printf("]");
(void) printf("\n");
}
}
/*
* Print out the command history for a specific pool.
*/
static int
get_history_one(zpool_handle_t *zhp, void *data)
{
nvlist_t *nvhis;
int ret;
hist_cbdata_t *cb = (hist_cbdata_t *)data;
uint64_t off = 0;
boolean_t eof = B_FALSE;
cb->first = B_FALSE;
(void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));
while (!eof) {
if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0)
return (ret);
print_history_records(nvhis, cb);
nvlist_free(nvhis);
}
(void) printf("\n");
return (ret);
}
/*
* zpool history <pool>
*
* Displays the history of commands that modified pools.
*/
int
zpool_do_history(int argc, char **argv)
{
hist_cbdata_t cbdata = { 0 };
int ret;
int c;
cbdata.first = B_TRUE;
/* check options */
while ((c = getopt(argc, argv, "li")) != -1) {
switch (c) {
case 'l':
cbdata.longfmt = B_TRUE;
break;
case 'i':
cbdata.internal = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
B_FALSE, get_history_one, &cbdata);
if (argc == 0 && cbdata.first == B_TRUE) {
(void) fprintf(stderr, gettext("no pools available\n"));
return (0);
}
return (ret);
}
typedef struct ev_opts {
int verbose;
int scripted;
int follow;
int clear;
char poolname[ZFS_MAX_DATASET_NAME_LEN];
} ev_opts_t;
static void
zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)
{
char ctime_str[26], str[32];
const char *ptr;
int64_t *tv;
uint_t n;
verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
memset(str, ' ', 32);
(void) ctime_r((const time_t *)&tv[0], ctime_str);
(void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */
(void) memcpy(str+7, ctime_str+20, 4); /* '1993' */
(void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */
(void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
if (opts->scripted)
(void) printf(gettext("%s\t"), str);
else
(void) printf(gettext("%s "), str);
verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);
(void) printf(gettext("%s\n"), ptr);
}
static void
zpool_do_events_nvprint(nvlist_t *nvl, int depth)
{
nvpair_t *nvp;
for (nvp = nvlist_next_nvpair(nvl, NULL);
nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
data_type_t type = nvpair_type(nvp);
const char *name = nvpair_name(nvp);
boolean_t b;
uint8_t i8;
uint16_t i16;
uint32_t i32;
uint64_t i64;
const char *str;
nvlist_t *cnv;
printf(gettext("%*s%s = "), depth, "", name);
switch (type) {
case DATA_TYPE_BOOLEAN:
printf(gettext("%s"), "1");
break;
case DATA_TYPE_BOOLEAN_VALUE:
(void) nvpair_value_boolean_value(nvp, &b);
printf(gettext("%s"), b ? "1" : "0");
break;
case DATA_TYPE_BYTE:
(void) nvpair_value_byte(nvp, &i8);
printf(gettext("0x%x"), i8);
break;
case DATA_TYPE_INT8:
(void) nvpair_value_int8(nvp, (void *)&i8);
printf(gettext("0x%x"), i8);
break;
case DATA_TYPE_UINT8:
(void) nvpair_value_uint8(nvp, &i8);
printf(gettext("0x%x"), i8);
break;
case DATA_TYPE_INT16:
(void) nvpair_value_int16(nvp, (void *)&i16);
printf(gettext("0x%x"), i16);
break;
case DATA_TYPE_UINT16:
(void) nvpair_value_uint16(nvp, &i16);
printf(gettext("0x%x"), i16);
break;
case DATA_TYPE_INT32:
(void) nvpair_value_int32(nvp, (void *)&i32);
printf(gettext("0x%x"), i32);
break;
case DATA_TYPE_UINT32:
(void) nvpair_value_uint32(nvp, &i32);
printf(gettext("0x%x"), i32);
break;
case DATA_TYPE_INT64:
(void) nvpair_value_int64(nvp, (void *)&i64);
printf(gettext("0x%llx"), (u_longlong_t)i64);
break;
case DATA_TYPE_UINT64:
(void) nvpair_value_uint64(nvp, &i64);
/*
* translate vdev state values to readable
* strings to aide zpool events consumers
*/
if (strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||
strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {
printf(gettext("\"%s\" (0x%llx)"),
zpool_state_to_name(i64, VDEV_AUX_NONE),
(u_longlong_t)i64);
} else {
printf(gettext("0x%llx"), (u_longlong_t)i64);
}
break;
case DATA_TYPE_HRTIME:
(void) nvpair_value_hrtime(nvp, (void *)&i64);
printf(gettext("0x%llx"), (u_longlong_t)i64);
break;
case DATA_TYPE_STRING:
(void) nvpair_value_string(nvp, &str);
printf(gettext("\"%s\""), str ? str : "<NULL>");
break;
case DATA_TYPE_NVLIST:
printf(gettext("(embedded nvlist)\n"));
(void) nvpair_value_nvlist(nvp, &cnv);
zpool_do_events_nvprint(cnv, depth + 8);
printf(gettext("%*s(end %s)"), depth, "", name);
break;
case DATA_TYPE_NVLIST_ARRAY: {
nvlist_t **val;
uint_t i, nelem;
(void) nvpair_value_nvlist_array(nvp, &val, &nelem);
printf(gettext("(%d embedded nvlists)\n"), nelem);
for (i = 0; i < nelem; i++) {
printf(gettext("%*s%s[%d] = %s\n"),
depth, "", name, i, "(embedded nvlist)");
zpool_do_events_nvprint(val[i], depth + 8);
printf(gettext("%*s(end %s[%i])\n"),
depth, "", name, i);
}
printf(gettext("%*s(end %s)\n"), depth, "", name);
}
break;
case DATA_TYPE_INT8_ARRAY: {
int8_t *val;
uint_t i, nelem;
(void) nvpair_value_int8_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_UINT8_ARRAY: {
uint8_t *val;
uint_t i, nelem;
(void) nvpair_value_uint8_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_INT16_ARRAY: {
int16_t *val;
uint_t i, nelem;
(void) nvpair_value_int16_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_UINT16_ARRAY: {
uint16_t *val;
uint_t i, nelem;
(void) nvpair_value_uint16_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_INT32_ARRAY: {
int32_t *val;
uint_t i, nelem;
(void) nvpair_value_int32_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_UINT32_ARRAY: {
uint32_t *val;
uint_t i, nelem;
(void) nvpair_value_uint32_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_INT64_ARRAY: {
int64_t *val;
uint_t i, nelem;
(void) nvpair_value_int64_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%llx "),
(u_longlong_t)val[i]);
break;
}
case DATA_TYPE_UINT64_ARRAY: {
uint64_t *val;
uint_t i, nelem;
(void) nvpair_value_uint64_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%llx "),
(u_longlong_t)val[i]);
break;
}
case DATA_TYPE_STRING_ARRAY: {
const char **str;
uint_t i, nelem;
(void) nvpair_value_string_array(nvp, &str, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("\"%s\" "),
str[i] ? str[i] : "<NULL>");
break;
}
case DATA_TYPE_BOOLEAN_ARRAY:
case DATA_TYPE_BYTE_ARRAY:
case DATA_TYPE_DOUBLE:
case DATA_TYPE_DONTCARE:
case DATA_TYPE_UNKNOWN:
printf(gettext("<unknown>"));
break;
}
printf(gettext("\n"));
}
}
static int
zpool_do_events_next(ev_opts_t *opts)
{
nvlist_t *nvl;
int zevent_fd, ret, dropped;
const char *pool;
zevent_fd = open(ZFS_DEV, O_RDWR);
VERIFY(zevent_fd >= 0);
if (!opts->scripted)
(void) printf(gettext("%-30s %s\n"), "TIME", "CLASS");
while (1) {
ret = zpool_events_next(g_zfs, &nvl, &dropped,
(opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd);
if (ret || nvl == NULL)
break;
if (dropped > 0)
(void) printf(gettext("dropped %d events\n"), dropped);
if (strlen(opts->poolname) > 0 &&
nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&
strcmp(opts->poolname, pool) != 0)
continue;
zpool_do_events_short(nvl, opts);
if (opts->verbose) {
zpool_do_events_nvprint(nvl, 8);
printf(gettext("\n"));
}
(void) fflush(stdout);
nvlist_free(nvl);
}
VERIFY(0 == close(zevent_fd));
return (ret);
}
static int
zpool_do_events_clear(void)
{
int count, ret;
ret = zpool_events_clear(g_zfs, &count);
if (!ret)
(void) printf(gettext("cleared %d events\n"), count);
return (ret);
}
/*
* zpool events [-vHf [pool] | -c]
*
* Displays events logs by ZFS.
*/
int
zpool_do_events(int argc, char **argv)
{
ev_opts_t opts = { 0 };
int ret;
int c;
/* check options */
while ((c = getopt(argc, argv, "vHfc")) != -1) {
switch (c) {
case 'v':
opts.verbose = 1;
break;
case 'H':
opts.scripted = 1;
break;
case 'f':
opts.follow = 1;
break;
case 'c':
opts.clear = 1;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
} else if (argc == 1) {
(void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));
if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {
(void) fprintf(stderr,
gettext("invalid pool name '%s'\n"), opts.poolname);
usage(B_FALSE);
}
}
if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&
opts.clear) {
(void) fprintf(stderr,
gettext("invalid options combined with -c\n"));
usage(B_FALSE);
}
if (opts.clear)
ret = zpool_do_events_clear();
else
ret = zpool_do_events_next(&opts);
return (ret);
}
static int
get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data)
{
zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
char value[ZFS_MAXPROPLEN];
zprop_source_t srctype;
for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL;
pl = pl->pl_next) {
char *prop_name;
/*
* If the first property is pool name, it is a special
* placeholder that we can skip. This will also skip
* over the name property when 'all' is specified.
*/
if (pl->pl_prop == ZPOOL_PROP_NAME &&
pl == cbp->cb_proplist)
continue;
if (pl->pl_prop == ZPROP_INVAL) {
prop_name = pl->pl_user_prop;
} else {
prop_name = (char *)vdev_prop_to_name(pl->pl_prop);
}
if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop,
prop_name, value, sizeof (value), &srctype,
cbp->cb_literal) == 0) {
zprop_print_one_property(vdevname, cbp, prop_name,
value, srctype, NULL, NULL);
}
}
return (0);
}
static int
get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data)
{
zpool_handle_t *zhp = zhp_data;
zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
char *vdevname;
const char *type;
int ret;
/*
* zpool_vdev_name() transforms the root vdev name (i.e., root-0) to the
* pool name for display purposes, which is not desired. Fallback to
* zpool_vdev_name() when not dealing with the root vdev.
*/
type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
if (zhp != NULL && strcmp(type, "root") == 0)
vdevname = strdup("root-0");
else
vdevname = zpool_vdev_name(g_zfs, zhp, nv,
cbp->cb_vdevs.cb_name_flags);
(void) vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist);
ret = get_callback_vdev(zhp, vdevname, data);
free(vdevname);
return (ret);
}
static int
get_callback(zpool_handle_t *zhp, void *data)
{
zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
char value[ZFS_MAXPROPLEN];
zprop_source_t srctype;
zprop_list_t *pl;
int vid;
if (cbp->cb_type == ZFS_TYPE_VDEV) {
if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) {
for_each_vdev(zhp, get_callback_vdev_cb, data);
} else {
/* Adjust column widths for vdev properties */
for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
vid++) {
vdev_expand_proplist(zhp,
cbp->cb_vdevs.cb_names[vid],
&cbp->cb_proplist);
}
/* Display the properties */
for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
vid++) {
get_callback_vdev(zhp,
cbp->cb_vdevs.cb_names[vid], data);
}
}
} else {
assert(cbp->cb_type == ZFS_TYPE_POOL);
for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
/*
* Skip the special fake placeholder. This will also
* skip over the name property when 'all' is specified.
*/
if (pl->pl_prop == ZPOOL_PROP_NAME &&
pl == cbp->cb_proplist)
continue;
if (pl->pl_prop == ZPROP_INVAL &&
zfs_prop_user(pl->pl_user_prop)) {
srctype = ZPROP_SRC_LOCAL;
if (zpool_get_userprop(zhp, pl->pl_user_prop,
value, sizeof (value), &srctype) != 0)
continue;
zprop_print_one_property(zpool_get_name(zhp),
cbp, pl->pl_user_prop, value, srctype,
NULL, NULL);
} else if (pl->pl_prop == ZPROP_INVAL &&
(zpool_prop_feature(pl->pl_user_prop) ||
zpool_prop_unsupported(pl->pl_user_prop))) {
srctype = ZPROP_SRC_LOCAL;
if (zpool_prop_get_feature(zhp,
pl->pl_user_prop, value,
sizeof (value)) == 0) {
zprop_print_one_property(
zpool_get_name(zhp), cbp,
pl->pl_user_prop, value, srctype,
NULL, NULL);
}
} else {
if (zpool_get_prop(zhp, pl->pl_prop, value,
sizeof (value), &srctype,
cbp->cb_literal) != 0)
continue;
zprop_print_one_property(zpool_get_name(zhp),
cbp, zpool_prop_to_name(pl->pl_prop),
value, srctype, NULL, NULL);
}
}
}
return (0);
}
/*
* zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ...
*
* -H Scripted mode. Don't display headers, and separate properties
* by a single tab.
* -o List of columns to display. Defaults to
* "name,property,value,source".
* -p Display values in parsable (exact) format.
*
* Get properties of pools in the system. Output space statistics
* for each one as well as other attributes.
*/
int
zpool_do_get(int argc, char **argv)
{
zprop_get_cbdata_t cb = { 0 };
zprop_list_t fake_name = { 0 };
int ret;
int c, i;
char *propstr = NULL;
char *vdev = NULL;
cb.cb_first = B_TRUE;
/*
* Set up default columns and sources.
*/
cb.cb_sources = ZPROP_SRC_ALL;
cb.cb_columns[0] = GET_COL_NAME;
cb.cb_columns[1] = GET_COL_PROPERTY;
cb.cb_columns[2] = GET_COL_VALUE;
cb.cb_columns[3] = GET_COL_SOURCE;
cb.cb_type = ZFS_TYPE_POOL;
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
current_prop_type = cb.cb_type;
/* check options */
while ((c = getopt(argc, argv, ":Hpo:")) != -1) {
switch (c) {
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case 'o':
memset(&cb.cb_columns, 0, sizeof (cb.cb_columns));
i = 0;
for (char *tok; (tok = strsep(&optarg, ",")); ) {
static const char *const col_opts[] =
{ "name", "property", "value", "source",
"all" };
static const zfs_get_column_t col_cols[] =
{ GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE,
GET_COL_SOURCE };
if (i == ZFS_GET_NCOLS - 1) {
(void) fprintf(stderr, gettext("too "
"many fields given to -o "
"option\n"));
usage(B_FALSE);
}
for (c = 0; c < ARRAY_SIZE(col_opts); ++c)
if (strcmp(tok, col_opts[c]) == 0)
goto found;
(void) fprintf(stderr,
gettext("invalid column name '%s'\n"), tok);
usage(B_FALSE);
found:
if (c >= 4) {
if (i > 0) {
(void) fprintf(stderr,
gettext("\"all\" conflicts "
"with specific fields "
"given to -o option\n"));
usage(B_FALSE);
}
memcpy(cb.cb_columns, col_cols,
sizeof (col_cols));
i = ZFS_GET_NCOLS - 1;
} else
cb.cb_columns[i++] = col_cols[c];
}
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing property "
"argument\n"));
usage(B_FALSE);
}
/* Properties list is needed later by zprop_get_list() */
propstr = argv[0];
argc--;
argv++;
if (argc == 0) {
/* No args, so just print the defaults. */
} else if (are_all_pools(argc, argv)) {
/* All the args are pool names */
} else if (are_all_pools(1, argv)) {
/* The first arg is a pool name */
if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) ||
(argc == 2 && strcmp(argv[1], "root") == 0) ||
are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
&cb.cb_vdevs)) {
if (strcmp(argv[1], "root") == 0)
vdev = strdup("root-0");
else
vdev = strdup(argv[1]);
/* ... and the rest are vdev names */
cb.cb_vdevs.cb_names = &vdev;
cb.cb_vdevs.cb_names_count = argc - 1;
cb.cb_type = ZFS_TYPE_VDEV;
argc = 1; /* One pool to process */
} else {
fprintf(stderr, gettext("Expected a list of vdevs in"
" \"%s\", but got:\n"), argv[0]);
error_list_unresolved_vdevs(argc - 1, argv + 1,
argv[0], &cb.cb_vdevs);
fprintf(stderr, "\n");
usage(B_FALSE);
return (1);
}
} else {
/*
* The first arg isn't a pool name,
*/
fprintf(stderr, gettext("missing pool name.\n"));
fprintf(stderr, "\n");
usage(B_FALSE);
return (1);
}
if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist,
cb.cb_type) != 0) {
/* Use correct list of valid properties (pool or vdev) */
current_prop_type = cb.cb_type;
usage(B_FALSE);
}
if (cb.cb_proplist != NULL) {
fake_name.pl_prop = ZPOOL_PROP_NAME;
fake_name.pl_width = strlen(gettext("NAME"));
fake_name.pl_next = cb.cb_proplist;
cb.cb_proplist = &fake_name;
}
ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type,
cb.cb_literal, get_callback, &cb);
if (cb.cb_proplist == &fake_name)
zprop_free_list(fake_name.pl_next);
else
zprop_free_list(cb.cb_proplist);
if (vdev != NULL)
free(vdev);
return (ret);
}
typedef struct set_cbdata {
char *cb_propname;
char *cb_value;
zfs_type_t cb_type;
vdev_cbdata_t cb_vdevs;
boolean_t cb_any_successful;
} set_cbdata_t;
static int
set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb)
{
int error;
/* Check if we have out-of-bounds features */
if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) {
boolean_t features[SPA_FEATURES];
if (zpool_do_load_compat(cb->cb_value, features) !=
ZPOOL_COMPATIBILITY_OK)
return (-1);
nvlist_t *enabled = zpool_get_features(zhp);
spa_feature_t i;
for (i = 0; i < SPA_FEATURES; i++) {
const char *fguid = spa_feature_table[i].fi_guid;
if (nvlist_exists(enabled, fguid) && !features[i])
break;
}
if (i < SPA_FEATURES)
(void) fprintf(stderr, gettext("Warning: one or "
"more features already enabled on pool '%s'\n"
"are not present in this compatibility set.\n"),
zpool_get_name(zhp));
}
/* if we're setting a feature, check it's in compatibility set */
if (zpool_prop_feature(cb->cb_propname) &&
strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) {
char *fname = strchr(cb->cb_propname, '@') + 1;
spa_feature_t f;
if (zfeature_lookup_name(fname, &f) == 0) {
char compat[ZFS_MAXPROPLEN];
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY,
compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
compat[0] = '\0';
boolean_t features[SPA_FEATURES];
if (zpool_do_load_compat(compat, features) !=
ZPOOL_COMPATIBILITY_OK) {
(void) fprintf(stderr, gettext("Error: "
"cannot enable feature '%s' on pool '%s'\n"
"because the pool's 'compatibility' "
"property cannot be parsed.\n"),
fname, zpool_get_name(zhp));
return (-1);
}
if (!features[f]) {
(void) fprintf(stderr, gettext("Error: "
"cannot enable feature '%s' on pool '%s'\n"
"as it is not specified in this pool's "
"current compatibility set.\n"
"Consider setting 'compatibility' to a "
"less restrictive set, or to 'off'.\n"),
fname, zpool_get_name(zhp));
return (-1);
}
}
}
error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);
return (error);
}
static int
set_callback(zpool_handle_t *zhp, void *data)
{
int error;
set_cbdata_t *cb = (set_cbdata_t *)data;
if (cb->cb_type == ZFS_TYPE_VDEV) {
error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names,
cb->cb_propname, cb->cb_value);
} else {
assert(cb->cb_type == ZFS_TYPE_POOL);
error = set_pool_callback(zhp, cb);
}
cb->cb_any_successful = !error;
return (error);
}
int
zpool_do_set(int argc, char **argv)
{
set_cbdata_t cb = { 0 };
int error;
char *vdev = NULL;
current_prop_type = ZFS_TYPE_POOL;
if (argc > 1 && argv[1][0] == '-') {
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
argv[1][1]);
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing property=value "
"argument\n"));
usage(B_FALSE);
}
if (argc < 3) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 4) {
(void) fprintf(stderr, gettext("too many pool names\n"));
usage(B_FALSE);
}
cb.cb_propname = argv[1];
cb.cb_type = ZFS_TYPE_POOL;
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
cb.cb_value = strchr(cb.cb_propname, '=');
if (cb.cb_value == NULL) {
(void) fprintf(stderr, gettext("missing value in "
"property=value argument\n"));
usage(B_FALSE);
}
*(cb.cb_value) = '\0';
cb.cb_value++;
argc -= 2;
argv += 2;
/* argv[0] is pool name */
if (!is_pool(argv[0])) {
(void) fprintf(stderr,
gettext("cannot open '%s': is not a pool\n"), argv[0]);
return (EINVAL);
}
/* argv[1], when supplied, is vdev name */
if (argc == 2) {
if (strcmp(argv[1], "root") == 0)
vdev = strdup("root-0");
else
vdev = strdup(argv[1]);
if (!are_vdevs_in_pool(1, &vdev, argv[0], &cb.cb_vdevs)) {
(void) fprintf(stderr, gettext(
"cannot find '%s' in '%s': device not in pool\n"),
vdev, argv[0]);
free(vdev);
return (EINVAL);
}
cb.cb_vdevs.cb_names = &vdev;
cb.cb_vdevs.cb_names_count = 1;
cb.cb_type = ZFS_TYPE_VDEV;
}
error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, set_callback, &cb);
if (vdev != NULL)
free(vdev);
return (error);
}
/* Add up the total number of bytes left to initialize/trim across all vdevs */
static uint64_t
vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity)
{
uint64_t bytes_remaining;
nvlist_t **child;
uint_t c, children;
vdev_stat_t *vs;
assert(activity == ZPOOL_WAIT_INITIALIZE ||
activity == ZPOOL_WAIT_TRIM);
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
if (activity == ZPOOL_WAIT_INITIALIZE &&
vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE)
bytes_remaining = vs->vs_initialize_bytes_est -
vs->vs_initialize_bytes_done;
else if (activity == ZPOOL_WAIT_TRIM &&
vs->vs_trim_state == VDEV_TRIM_ACTIVE)
bytes_remaining = vs->vs_trim_bytes_est -
vs->vs_trim_bytes_done;
else
bytes_remaining = 0;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (c = 0; c < children; c++)
bytes_remaining += vdev_activity_remaining(child[c], activity);
return (bytes_remaining);
}
/* Add up the total number of bytes left to rebuild across top-level vdevs */
static uint64_t
vdev_activity_top_remaining(nvlist_t *nv)
{
uint64_t bytes_remaining = 0;
nvlist_t **child;
uint_t children;
int error;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (uint_t c = 0; c < children; c++) {
vdev_rebuild_stat_t *vrs;
uint_t i;
error = nvlist_lookup_uint64_array(child[c],
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i);
if (error == 0) {
if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
bytes_remaining += (vrs->vrs_bytes_est -
vrs->vrs_bytes_rebuilt);
}
}
}
return (bytes_remaining);
}
/* Whether any vdevs are 'spare' or 'replacing' vdevs */
static boolean_t
vdev_any_spare_replacing(nvlist_t *nv)
{
nvlist_t **child;
uint_t c, children;
const char *vdev_type;
(void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);
if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||
strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 ||
strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) {
return (B_TRUE);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (c = 0; c < children; c++) {
if (vdev_any_spare_replacing(child[c]))
return (B_TRUE);
}
return (B_FALSE);
}
typedef struct wait_data {
char *wd_poolname;
boolean_t wd_scripted;
boolean_t wd_exact;
boolean_t wd_headers_once;
boolean_t wd_should_exit;
/* Which activities to wait for */
boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES];
float wd_interval;
pthread_cond_t wd_cv;
pthread_mutex_t wd_mutex;
} wait_data_t;
/*
* Print to stdout a single line, containing one column for each activity that
* we are waiting for specifying how many bytes of work are left for that
* activity.
*/
static void
print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
{
nvlist_t *config, *nvroot;
uint_t c;
int i;
pool_checkpoint_stat_t *pcs = NULL;
pool_scan_stat_t *pss = NULL;
pool_removal_stat_t *prs = NULL;
const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE",
"REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM"};
int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];
/* Calculate the width of each column */
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
/*
* Make sure we have enough space in the col for pretty-printed
* numbers and for the column header, and then leave a couple
* spaces between cols for readability.
*/
col_widths[i] = MAX(strlen(headers[i]), 6) + 2;
}
/* Print header if appropriate */
int term_height = terminal_height();
boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 &&
row % (term_height-1) == 0);
if (!wd->wd_scripted && (row == 0 || reprint_header)) {
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
if (wd->wd_enabled[i])
(void) printf("%*s", col_widths[i], headers[i]);
}
(void) fputc('\n', stdout);
}
/* Bytes of work remaining in each activity */
int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0};
bytes_rem[ZPOOL_WAIT_FREE] =
zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL);
config = zpool_get_config(zhp, NULL);
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space;
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
if (prs != NULL && prs->prs_state == DSS_SCANNING)
bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy -
prs->prs_copied;
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c);
if (pss != NULL && pss->pss_state == DSS_SCANNING &&
pss->pss_pass_scrub_pause == 0) {
int64_t rem = pss->pss_to_examine - pss->pss_issued;
if (pss->pss_func == POOL_SCAN_SCRUB)
bytes_rem[ZPOOL_WAIT_SCRUB] = rem;
else
bytes_rem[ZPOOL_WAIT_RESILVER] = rem;
} else if (check_rebuilding(nvroot, NULL)) {
bytes_rem[ZPOOL_WAIT_RESILVER] =
vdev_activity_top_remaining(nvroot);
}
bytes_rem[ZPOOL_WAIT_INITIALIZE] =
vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE);
bytes_rem[ZPOOL_WAIT_TRIM] =
vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM);
/*
* A replace finishes after resilvering finishes, so the amount of work
* left for a replace is the same as for resilvering.
*
* It isn't quite correct to say that if we have any 'spare' or
* 'replacing' vdevs and a resilver is happening, then a replace is in
* progress, like we do here. When a hot spare is used, the faulted vdev
* is not removed after the hot spare is resilvered, so parent 'spare'
* vdev is not removed either. So we could have a 'spare' vdev, but be
* resilvering for a different reason. However, we use it as a heuristic
* because we don't have access to the DTLs, which could tell us whether
* or not we have really finished resilvering a hot spare.
*/
if (vdev_any_spare_replacing(nvroot))
bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER];
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
char buf[64];
if (!wd->wd_enabled[i])
continue;
if (wd->wd_exact)
(void) snprintf(buf, sizeof (buf), "%" PRIi64,
bytes_rem[i]);
else
zfs_nicenum(bytes_rem[i], buf, sizeof (buf));
if (wd->wd_scripted)
(void) printf(i == 0 ? "%s" : "\t%s", buf);
else
(void) printf(" %*s", col_widths[i] - 1, buf);
}
(void) printf("\n");
(void) fflush(stdout);
}
static void *
wait_status_thread(void *arg)
{
wait_data_t *wd = (wait_data_t *)arg;
zpool_handle_t *zhp;
if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL)
return (void *)(1);
for (int row = 0; ; row++) {
boolean_t missing;
struct timespec timeout;
int ret = 0;
(void) clock_gettime(CLOCK_REALTIME, &timeout);
if (zpool_refresh_stats(zhp, &missing) != 0 || missing ||
zpool_props_refresh(zhp) != 0) {
zpool_close(zhp);
return (void *)(uintptr_t)(missing ? 0 : 1);
}
print_wait_status_row(wd, zhp, row);
timeout.tv_sec += floor(wd->wd_interval);
long nanos = timeout.tv_nsec +
(wd->wd_interval - floor(wd->wd_interval)) * NANOSEC;
if (nanos >= NANOSEC) {
timeout.tv_sec++;
timeout.tv_nsec = nanos - NANOSEC;
} else {
timeout.tv_nsec = nanos;
}
pthread_mutex_lock(&wd->wd_mutex);
if (!wd->wd_should_exit)
ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex,
&timeout);
pthread_mutex_unlock(&wd->wd_mutex);
if (ret == 0) {
break; /* signaled by main thread */
} else if (ret != ETIMEDOUT) {
(void) fprintf(stderr, gettext("pthread_cond_timedwait "
"failed: %s\n"), strerror(ret));
zpool_close(zhp);
return (void *)(uintptr_t)(1);
}
}
zpool_close(zhp);
return (void *)(0);
}
int
zpool_do_wait(int argc, char **argv)
{
boolean_t verbose = B_FALSE;
int c, i;
unsigned long count;
pthread_t status_thr;
int error = 0;
zpool_handle_t *zhp;
wait_data_t wd;
wd.wd_scripted = B_FALSE;
wd.wd_exact = B_FALSE;
wd.wd_headers_once = B_FALSE;
wd.wd_should_exit = B_FALSE;
pthread_mutex_init(&wd.wd_mutex, NULL);
pthread_cond_init(&wd.wd_cv, NULL);
/* By default, wait for all types of activity. */
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)
wd.wd_enabled[i] = B_TRUE;
while ((c = getopt(argc, argv, "HpT:t:")) != -1) {
switch (c) {
case 'H':
wd.wd_scripted = B_TRUE;
break;
case 'n':
wd.wd_headers_once = B_TRUE;
break;
case 'p':
wd.wd_exact = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 't':
/* Reset activities array */
memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled));
for (char *tok; (tok = strsep(&optarg, ",")); ) {
static const char *const col_opts[] = {
"discard", "free", "initialize", "replace",
"remove", "resilver", "scrub", "trim" };
for (i = 0; i < ARRAY_SIZE(col_opts); ++i)
if (strcmp(tok, col_opts[i]) == 0) {
wd.wd_enabled[i] = B_TRUE;
goto found;
}
(void) fprintf(stderr,
gettext("invalid activity '%s'\n"), tok);
usage(B_FALSE);
found:;
}
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
get_interval_count(&argc, argv, &wd.wd_interval, &count);
if (count != 0) {
/* This subcmd only accepts an interval, not a count */
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (wd.wd_interval != 0)
verbose = B_TRUE;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing 'pool' argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
wd.wd_poolname = argv[0];
if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL)
return (1);
if (verbose) {
/*
* We use a separate thread for printing status updates because
* the main thread will call lzc_wait(), which blocks as long
* as an activity is in progress, which can be a long time.
*/
if (pthread_create(&status_thr, NULL, wait_status_thread, &wd)
!= 0) {
(void) fprintf(stderr, gettext("failed to create status"
"thread: %s\n"), strerror(errno));
zpool_close(zhp);
return (1);
}
}
/*
* Loop over all activities that we are supposed to wait for until none
* of them are in progress. Note that this means we can end up waiting
* for more activities to complete than just those that were in progress
* when we began waiting; if an activity we are interested in begins
* while we are waiting for another activity, we will wait for both to
* complete before exiting.
*/
for (;;) {
boolean_t missing = B_FALSE;
boolean_t any_waited = B_FALSE;
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
boolean_t waited;
if (!wd.wd_enabled[i])
continue;
error = zpool_wait_status(zhp, i, &missing, &waited);
if (error != 0 || missing)
break;
any_waited = (any_waited || waited);
}
if (error != 0 || missing || !any_waited)
break;
}
zpool_close(zhp);
if (verbose) {
uintptr_t status;
pthread_mutex_lock(&wd.wd_mutex);
wd.wd_should_exit = B_TRUE;
pthread_cond_signal(&wd.wd_cv);
pthread_mutex_unlock(&wd.wd_mutex);
(void) pthread_join(status_thr, (void *)&status);
if (status != 0)
error = status;
}
pthread_mutex_destroy(&wd.wd_mutex);
pthread_cond_destroy(&wd.wd_cv);
return (error);
}
static int
find_command_idx(const char *command, int *idx)
{
for (int i = 0; i < NCOMMAND; ++i) {
if (command_table[i].name == NULL)
continue;
if (strcmp(command, command_table[i].name) == 0) {
*idx = i;
return (0);
}
}
return (1);
}
/*
* Display version message
*/
static int
zpool_do_version(int argc, char **argv)
{
(void) argc, (void) argv;
return (zfs_version_print() != 0);
}
/*
* Do zpool_load_compat() and print error message on failure
*/
static zpool_compat_status_t
zpool_do_load_compat(const char *compat, boolean_t *list)
{
char report[1024];
zpool_compat_status_t ret;
ret = zpool_load_compat(compat, list, report, 1024);
switch (ret) {
case ZPOOL_COMPATIBILITY_OK:
break;
case ZPOOL_COMPATIBILITY_NOFILES:
case ZPOOL_COMPATIBILITY_BADFILE:
case ZPOOL_COMPATIBILITY_BADTOKEN:
(void) fprintf(stderr, "Error: %s\n", report);
break;
case ZPOOL_COMPATIBILITY_WARNTOKEN:
(void) fprintf(stderr, "Warning: %s\n", report);
ret = ZPOOL_COMPATIBILITY_OK;
break;
}
return (ret);
}
int
main(int argc, char **argv)
{
int ret = 0;
int i = 0;
char *cmdname;
char **newargv;
(void) setlocale(LC_ALL, "");
(void) setlocale(LC_NUMERIC, "C");
(void) textdomain(TEXT_DOMAIN);
srand(time(NULL));
opterr = 0;
/*
* Make sure the user has specified some command.
*/
if (argc < 2) {
(void) fprintf(stderr, gettext("missing command\n"));
usage(B_FALSE);
}
cmdname = argv[1];
/*
* Special case '-?'
*/
if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)
usage(B_TRUE);
/*
* Special case '-V|--version'
*/
if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
return (zpool_do_version(argc, argv));
if ((g_zfs = libzfs_init()) == NULL) {
(void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
return (1);
}
libzfs_print_on_error(g_zfs, B_TRUE);
zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
/*
* Many commands modify input strings for string parsing reasons.
* We create a copy to protect the original argv.
*/
newargv = safe_malloc((argc + 1) * sizeof (newargv[0]));
for (i = 0; i < argc; i++)
newargv[i] = strdup(argv[i]);
newargv[argc] = NULL;
/*
* Run the appropriate command.
*/
if (find_command_idx(cmdname, &i) == 0) {
current_command = &command_table[i];
ret = command_table[i].func(argc - 1, newargv + 1);
} else if (strchr(cmdname, '=')) {
verify(find_command_idx("set", &i) == 0);
current_command = &command_table[i];
ret = command_table[i].func(argc, newargv);
} else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {
/*
* 'freeze' is a vile debugging abomination, so we treat
* it as such.
*/
zfs_cmd_t zc = {"\0"};
(void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));
ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to freeze pool: %d\n"), errno);
ret = 1;
}
log_history = 0;
} else {
(void) fprintf(stderr, gettext("unrecognized "
"command '%s'\n"), cmdname);
usage(B_FALSE);
ret = 1;
}
for (i = 0; i < argc; i++)
free(newargv[i]);
free(newargv);
if (ret == 0 && log_history)
(void) zpool_log_history(g_zfs, history_str);
libzfs_fini(g_zfs);
/*
* The 'ZFS_ABORT' environment variable causes us to dump core on exit
* for the purposes of running ::findleaks.
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
return (ret);
}
diff --git a/sys/contrib/openzfs/cmd/ztest.c b/sys/contrib/openzfs/cmd/ztest.c
index b6b99bfff6db..398c519cfc35 100644
--- a/sys/contrib/openzfs/cmd/ztest.c
+++ b/sys/contrib/openzfs/cmd/ztest.c
@@ -1,8324 +1,8324 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
*/
/*
* The objective of this program is to provide a DMU/ZAP/SPA stress test
* that runs entirely in userland, is easy to use, and easy to extend.
*
* The overall design of the ztest program is as follows:
*
* (1) For each major functional area (e.g. adding vdevs to a pool,
* creating and destroying datasets, reading and writing objects, etc)
* we have a simple routine to test that functionality. These
* individual routines do not have to do anything "stressful".
*
* (2) We turn these simple functionality tests into a stress test by
* running them all in parallel, with as many threads as desired,
* and spread across as many datasets, objects, and vdevs as desired.
*
* (3) While all this is happening, we inject faults into the pool to
* verify that self-healing data really works.
*
* (4) Every time we open a dataset, we change its checksum and compression
* functions. Thus even individual objects vary from block to block
* in which checksum they use and whether they're compressed.
*
* (5) To verify that we never lose on-disk consistency after a crash,
* we run the entire test in a child of the main process.
* At random times, the child self-immolates with a SIGKILL.
* This is the software equivalent of pulling the power cord.
* The parent then runs the test again, using the existing
* storage pool, as many times as desired. If backwards compatibility
* testing is enabled ztest will sometimes run the "older" version
* of ztest after a SIGKILL.
*
* (6) To verify that we don't have future leaks or temporal incursions,
* many of the functional tests record the transaction group number
* as part of their data. When reading old data, they verify that
* the transaction group number is less than the current, open txg.
* If you add a new test, please do this if applicable.
*
* (7) Threads are created with a reduced stack size, for sanity checking.
* Therefore, it's important not to allocate huge buffers on the stack.
*
* When run with no arguments, ztest runs for about five minutes and
* produces no output if successful. To get a little bit of information,
* specify -V. To get more information, specify -VV, and so on.
*
* To turn this into an overnight stress test, use -T to specify run time.
*
* You can ask more vdevs [-v], datasets [-d], or threads [-t]
* to increase the pool capacity, fanout, and overall stress level.
*
* Use the -k option to set the desired frequency of kills.
*
* When ztest invokes itself it passes all relevant information through a
* temporary file which is mmap-ed in the child process. This allows shared
* memory to survive the exec syscall. The ztest_shared_hdr_t struct is always
* stored at offset 0 of this file and contains information on the size and
* number of shared structures in the file. The information stored in this file
* must remain backwards compatible with older versions of ztest so that
* ztest can invoke them during backwards compatibility testing (-B).
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/dmu.h>
#include <sys/txg.h>
#include <sys/dbuf.h>
#include <sys/zap.h>
#include <sys/dmu_objset.h>
#include <sys/poll.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/wait.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/zio.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
#include <sys/vdev_draid.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_file.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_raidz.h>
#include <sys/vdev_trim.h>
#include <sys/spa_impl.h>
#include <sys/metaslab_impl.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_scan.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_refcount.h>
#include <sys/zfeature.h>
#include <sys/dsl_userhold.h>
#include <sys/abd.h>
#include <sys/blake3.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <getopt.h>
#include <signal.h>
#include <umem.h>
#include <ctype.h>
#include <math.h>
#include <sys/fs/zfs.h>
#include <zfs_fletcher.h>
#include <libnvpair.h>
#include <libzutil.h>
#include <sys/crypto/icp.h>
#include <sys/zfs_impl.h>
#if (__GLIBC__ && !__UCLIBC__)
#include <execinfo.h> /* for backtrace() */
#endif
static int ztest_fd_data = -1;
static int ztest_fd_rand = -1;
typedef struct ztest_shared_hdr {
uint64_t zh_hdr_size;
uint64_t zh_opts_size;
uint64_t zh_size;
uint64_t zh_stats_size;
uint64_t zh_stats_count;
uint64_t zh_ds_size;
uint64_t zh_ds_count;
} ztest_shared_hdr_t;
static ztest_shared_hdr_t *ztest_shared_hdr;
enum ztest_class_state {
ZTEST_VDEV_CLASS_OFF,
ZTEST_VDEV_CLASS_ON,
ZTEST_VDEV_CLASS_RND
};
#define ZO_GVARS_MAX_ARGLEN ((size_t)64)
#define ZO_GVARS_MAX_COUNT ((size_t)10)
typedef struct ztest_shared_opts {
char zo_pool[ZFS_MAX_DATASET_NAME_LEN];
char zo_dir[ZFS_MAX_DATASET_NAME_LEN];
char zo_alt_ztest[MAXNAMELEN];
char zo_alt_libpath[MAXNAMELEN];
uint64_t zo_vdevs;
uint64_t zo_vdevtime;
size_t zo_vdev_size;
int zo_ashift;
int zo_mirrors;
int zo_raid_children;
int zo_raid_parity;
char zo_raid_type[8];
int zo_draid_data;
int zo_draid_spares;
int zo_datasets;
int zo_threads;
uint64_t zo_passtime;
uint64_t zo_killrate;
int zo_verbose;
int zo_init;
uint64_t zo_time;
uint64_t zo_maxloops;
uint64_t zo_metaslab_force_ganging;
int zo_mmp_test;
int zo_special_vdevs;
int zo_dump_dbgmsg;
int zo_gvars_count;
char zo_gvars[ZO_GVARS_MAX_COUNT][ZO_GVARS_MAX_ARGLEN];
} ztest_shared_opts_t;
/* Default values for command line options. */
#define DEFAULT_POOL "ztest"
#define DEFAULT_VDEV_DIR "/tmp"
#define DEFAULT_VDEV_COUNT 5
#define DEFAULT_VDEV_SIZE (SPA_MINDEVSIZE * 4) /* 256m default size */
#define DEFAULT_VDEV_SIZE_STR "256M"
#define DEFAULT_ASHIFT SPA_MINBLOCKSHIFT
#define DEFAULT_MIRRORS 2
#define DEFAULT_RAID_CHILDREN 4
#define DEFAULT_RAID_PARITY 1
#define DEFAULT_DRAID_DATA 4
#define DEFAULT_DRAID_SPARES 1
#define DEFAULT_DATASETS_COUNT 7
#define DEFAULT_THREADS 23
#define DEFAULT_RUN_TIME 300 /* 300 seconds */
#define DEFAULT_RUN_TIME_STR "300 sec"
#define DEFAULT_PASS_TIME 60 /* 60 seconds */
#define DEFAULT_PASS_TIME_STR "60 sec"
#define DEFAULT_KILL_RATE 70 /* 70% kill rate */
#define DEFAULT_KILLRATE_STR "70%"
#define DEFAULT_INITS 1
#define DEFAULT_MAX_LOOPS 50 /* 5 minutes */
#define DEFAULT_FORCE_GANGING (64 << 10)
#define DEFAULT_FORCE_GANGING_STR "64K"
/* Simplifying assumption: -1 is not a valid default. */
#define NO_DEFAULT -1
static const ztest_shared_opts_t ztest_opts_defaults = {
.zo_pool = DEFAULT_POOL,
.zo_dir = DEFAULT_VDEV_DIR,
.zo_alt_ztest = { '\0' },
.zo_alt_libpath = { '\0' },
.zo_vdevs = DEFAULT_VDEV_COUNT,
.zo_ashift = DEFAULT_ASHIFT,
.zo_mirrors = DEFAULT_MIRRORS,
.zo_raid_children = DEFAULT_RAID_CHILDREN,
.zo_raid_parity = DEFAULT_RAID_PARITY,
.zo_raid_type = VDEV_TYPE_RAIDZ,
.zo_vdev_size = DEFAULT_VDEV_SIZE,
.zo_draid_data = DEFAULT_DRAID_DATA, /* data drives */
.zo_draid_spares = DEFAULT_DRAID_SPARES, /* distributed spares */
.zo_datasets = DEFAULT_DATASETS_COUNT,
.zo_threads = DEFAULT_THREADS,
.zo_passtime = DEFAULT_PASS_TIME,
.zo_killrate = DEFAULT_KILL_RATE,
.zo_verbose = 0,
.zo_mmp_test = 0,
.zo_init = DEFAULT_INITS,
.zo_time = DEFAULT_RUN_TIME,
.zo_maxloops = DEFAULT_MAX_LOOPS, /* max loops during spa_freeze() */
.zo_metaslab_force_ganging = DEFAULT_FORCE_GANGING,
.zo_special_vdevs = ZTEST_VDEV_CLASS_RND,
.zo_gvars_count = 0,
};
extern uint64_t metaslab_force_ganging;
extern uint64_t metaslab_df_alloc_threshold;
extern uint64_t zfs_deadman_synctime_ms;
extern uint_t metaslab_preload_limit;
extern int zfs_compressed_arc_enabled;
extern int zfs_abd_scatter_enabled;
extern uint_t dmu_object_alloc_chunk_shift;
extern boolean_t zfs_force_some_double_word_sm_entries;
extern unsigned long zio_decompress_fail_fraction;
extern unsigned long zfs_reconstruct_indirect_damage_fraction;
static ztest_shared_opts_t *ztest_shared_opts;
static ztest_shared_opts_t ztest_opts;
static const char *const ztest_wkeydata = "abcdefghijklmnopqrstuvwxyz012345";
typedef struct ztest_shared_ds {
uint64_t zd_seq;
} ztest_shared_ds_t;
static ztest_shared_ds_t *ztest_shared_ds;
#define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d])
#define BT_MAGIC 0x123456789abcdefULL
#define MAXFAULTS(zs) \
(MAX((zs)->zs_mirrors, 1) * (ztest_opts.zo_raid_parity + 1) - 1)
enum ztest_io_type {
ZTEST_IO_WRITE_TAG,
ZTEST_IO_WRITE_PATTERN,
ZTEST_IO_WRITE_ZEROES,
ZTEST_IO_TRUNCATE,
ZTEST_IO_SETATTR,
ZTEST_IO_REWRITE,
ZTEST_IO_TYPES
};
typedef struct ztest_block_tag {
uint64_t bt_magic;
uint64_t bt_objset;
uint64_t bt_object;
uint64_t bt_dnodesize;
uint64_t bt_offset;
uint64_t bt_gen;
uint64_t bt_txg;
uint64_t bt_crtxg;
} ztest_block_tag_t;
typedef struct bufwad {
uint64_t bw_index;
uint64_t bw_txg;
uint64_t bw_data;
} bufwad_t;
/*
* It would be better to use a rangelock_t per object. Unfortunately
* the rangelock_t is not a drop-in replacement for rl_t, because we
* still need to map from object ID to rangelock_t.
*/
typedef enum {
RL_READER,
RL_WRITER,
RL_APPEND
} rl_type_t;
typedef struct rll {
void *rll_writer;
int rll_readers;
kmutex_t rll_lock;
kcondvar_t rll_cv;
} rll_t;
typedef struct rl {
uint64_t rl_object;
uint64_t rl_offset;
uint64_t rl_size;
rll_t *rl_lock;
} rl_t;
#define ZTEST_RANGE_LOCKS 64
#define ZTEST_OBJECT_LOCKS 64
/*
* Object descriptor. Used as a template for object lookup/create/remove.
*/
typedef struct ztest_od {
uint64_t od_dir;
uint64_t od_object;
dmu_object_type_t od_type;
dmu_object_type_t od_crtype;
uint64_t od_blocksize;
uint64_t od_crblocksize;
uint64_t od_crdnodesize;
uint64_t od_gen;
uint64_t od_crgen;
char od_name[ZFS_MAX_DATASET_NAME_LEN];
} ztest_od_t;
/*
* Per-dataset state.
*/
typedef struct ztest_ds {
ztest_shared_ds_t *zd_shared;
objset_t *zd_os;
pthread_rwlock_t zd_zilog_lock;
zilog_t *zd_zilog;
ztest_od_t *zd_od; /* debugging aid */
char zd_name[ZFS_MAX_DATASET_NAME_LEN];
kmutex_t zd_dirobj_lock;
rll_t zd_object_lock[ZTEST_OBJECT_LOCKS];
rll_t zd_range_lock[ZTEST_RANGE_LOCKS];
} ztest_ds_t;
/*
* Per-iteration state.
*/
typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id);
typedef struct ztest_info {
ztest_func_t *zi_func; /* test function */
uint64_t zi_iters; /* iterations per execution */
uint64_t *zi_interval; /* execute every <interval> seconds */
const char *zi_funcname; /* name of test function */
} ztest_info_t;
typedef struct ztest_shared_callstate {
uint64_t zc_count; /* per-pass count */
uint64_t zc_time; /* per-pass time */
uint64_t zc_next; /* next time to call this function */
} ztest_shared_callstate_t;
static ztest_shared_callstate_t *ztest_shared_callstate;
#define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c])
ztest_func_t ztest_dmu_read_write;
ztest_func_t ztest_dmu_write_parallel;
ztest_func_t ztest_dmu_object_alloc_free;
ztest_func_t ztest_dmu_object_next_chunk;
ztest_func_t ztest_dmu_commit_callbacks;
ztest_func_t ztest_zap;
ztest_func_t ztest_zap_parallel;
ztest_func_t ztest_zil_commit;
ztest_func_t ztest_zil_remount;
ztest_func_t ztest_dmu_read_write_zcopy;
ztest_func_t ztest_dmu_objset_create_destroy;
ztest_func_t ztest_dmu_prealloc;
ztest_func_t ztest_fzap;
ztest_func_t ztest_dmu_snapshot_create_destroy;
ztest_func_t ztest_dsl_prop_get_set;
ztest_func_t ztest_spa_prop_get_set;
ztest_func_t ztest_spa_create_destroy;
ztest_func_t ztest_fault_inject;
ztest_func_t ztest_dmu_snapshot_hold;
ztest_func_t ztest_mmp_enable_disable;
ztest_func_t ztest_scrub;
ztest_func_t ztest_dsl_dataset_promote_busy;
ztest_func_t ztest_vdev_attach_detach;
ztest_func_t ztest_vdev_LUN_growth;
ztest_func_t ztest_vdev_add_remove;
ztest_func_t ztest_vdev_class_add;
ztest_func_t ztest_vdev_aux_add_remove;
ztest_func_t ztest_split_pool;
ztest_func_t ztest_reguid;
ztest_func_t ztest_spa_upgrade;
ztest_func_t ztest_device_removal;
ztest_func_t ztest_spa_checkpoint_create_discard;
ztest_func_t ztest_initialize;
ztest_func_t ztest_trim;
ztest_func_t ztest_blake3;
ztest_func_t ztest_fletcher;
ztest_func_t ztest_fletcher_incr;
ztest_func_t ztest_verify_dnode_bt;
static uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */
static uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */
static uint64_t zopt_often = 1ULL * NANOSEC; /* every second */
static uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */
static uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */
#define ZTI_INIT(func, iters, interval) \
{ .zi_func = (func), \
.zi_iters = (iters), \
.zi_interval = (interval), \
.zi_funcname = # func }
static ztest_info_t ztest_info[] = {
ZTI_INIT(ztest_dmu_read_write, 1, &zopt_always),
ZTI_INIT(ztest_dmu_write_parallel, 10, &zopt_always),
ZTI_INIT(ztest_dmu_object_alloc_free, 1, &zopt_always),
ZTI_INIT(ztest_dmu_object_next_chunk, 1, &zopt_sometimes),
ZTI_INIT(ztest_dmu_commit_callbacks, 1, &zopt_always),
ZTI_INIT(ztest_zap, 30, &zopt_always),
ZTI_INIT(ztest_zap_parallel, 100, &zopt_always),
ZTI_INIT(ztest_split_pool, 1, &zopt_sometimes),
ZTI_INIT(ztest_zil_commit, 1, &zopt_incessant),
ZTI_INIT(ztest_zil_remount, 1, &zopt_sometimes),
ZTI_INIT(ztest_dmu_read_write_zcopy, 1, &zopt_often),
ZTI_INIT(ztest_dmu_objset_create_destroy, 1, &zopt_often),
ZTI_INIT(ztest_dsl_prop_get_set, 1, &zopt_often),
ZTI_INIT(ztest_spa_prop_get_set, 1, &zopt_sometimes),
#if 0
ZTI_INIT(ztest_dmu_prealloc, 1, &zopt_sometimes),
#endif
ZTI_INIT(ztest_fzap, 1, &zopt_sometimes),
ZTI_INIT(ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes),
ZTI_INIT(ztest_spa_create_destroy, 1, &zopt_sometimes),
ZTI_INIT(ztest_fault_inject, 1, &zopt_sometimes),
ZTI_INIT(ztest_dmu_snapshot_hold, 1, &zopt_sometimes),
ZTI_INIT(ztest_mmp_enable_disable, 1, &zopt_sometimes),
ZTI_INIT(ztest_reguid, 1, &zopt_rarely),
ZTI_INIT(ztest_scrub, 1, &zopt_rarely),
ZTI_INIT(ztest_spa_upgrade, 1, &zopt_rarely),
ZTI_INIT(ztest_dsl_dataset_promote_busy, 1, &zopt_rarely),
ZTI_INIT(ztest_vdev_attach_detach, 1, &zopt_sometimes),
ZTI_INIT(ztest_vdev_LUN_growth, 1, &zopt_rarely),
ZTI_INIT(ztest_vdev_add_remove, 1, &ztest_opts.zo_vdevtime),
ZTI_INIT(ztest_vdev_class_add, 1, &ztest_opts.zo_vdevtime),
ZTI_INIT(ztest_vdev_aux_add_remove, 1, &ztest_opts.zo_vdevtime),
ZTI_INIT(ztest_device_removal, 1, &zopt_sometimes),
ZTI_INIT(ztest_spa_checkpoint_create_discard, 1, &zopt_rarely),
ZTI_INIT(ztest_initialize, 1, &zopt_sometimes),
ZTI_INIT(ztest_trim, 1, &zopt_sometimes),
ZTI_INIT(ztest_blake3, 1, &zopt_rarely),
ZTI_INIT(ztest_fletcher, 1, &zopt_rarely),
ZTI_INIT(ztest_fletcher_incr, 1, &zopt_rarely),
ZTI_INIT(ztest_verify_dnode_bt, 1, &zopt_sometimes),
};
#define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t))
/*
* The following struct is used to hold a list of uncalled commit callbacks.
* The callbacks are ordered by txg number.
*/
typedef struct ztest_cb_list {
kmutex_t zcl_callbacks_lock;
list_t zcl_callbacks;
} ztest_cb_list_t;
/*
* Stuff we need to share writably between parent and child.
*/
typedef struct ztest_shared {
boolean_t zs_do_init;
hrtime_t zs_proc_start;
hrtime_t zs_proc_stop;
hrtime_t zs_thread_start;
hrtime_t zs_thread_stop;
hrtime_t zs_thread_kill;
uint64_t zs_enospc_count;
uint64_t zs_vdev_next_leaf;
uint64_t zs_vdev_aux;
uint64_t zs_alloc;
uint64_t zs_space;
uint64_t zs_splits;
uint64_t zs_mirrors;
uint64_t zs_metaslab_sz;
uint64_t zs_metaslab_df_alloc_threshold;
uint64_t zs_guid;
} ztest_shared_t;
#define ID_PARALLEL -1ULL
static char ztest_dev_template[] = "%s/%s.%llua";
static char ztest_aux_template[] = "%s/%s.%s.%llu";
static ztest_shared_t *ztest_shared;
static spa_t *ztest_spa = NULL;
static ztest_ds_t *ztest_ds;
static kmutex_t ztest_vdev_lock;
static boolean_t ztest_device_removal_active = B_FALSE;
static boolean_t ztest_pool_scrubbed = B_FALSE;
static kmutex_t ztest_checkpoint_lock;
/*
* The ztest_name_lock protects the pool and dataset namespace used by
* the individual tests. To modify the namespace, consumers must grab
* this lock as writer. Grabbing the lock as reader will ensure that the
* namespace does not change while the lock is held.
*/
static pthread_rwlock_t ztest_name_lock;
static boolean_t ztest_dump_core = B_TRUE;
static boolean_t ztest_exiting;
/* Global commit callback list */
static ztest_cb_list_t zcl;
/* Commit cb delay */
static uint64_t zc_min_txg_delay = UINT64_MAX;
static int zc_cb_counter = 0;
/*
* Minimum number of commit callbacks that need to be registered for us to check
* whether the minimum txg delay is acceptable.
*/
#define ZTEST_COMMIT_CB_MIN_REG 100
/*
* If a number of txgs equal to this threshold have been created after a commit
* callback has been registered but not called, then we assume there is an
* implementation bug.
*/
#define ZTEST_COMMIT_CB_THRESH (TXG_CONCURRENT_STATES + 1000)
enum ztest_object {
ZTEST_META_DNODE = 0,
ZTEST_DIROBJ,
ZTEST_OBJECTS
};
static __attribute__((noreturn)) void usage(boolean_t requested);
static int ztest_scrub_impl(spa_t *spa);
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
*/
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
static void
dump_debug_buffer(void)
{
ssize_t ret __attribute__((unused));
if (!ztest_opts.zo_dump_dbgmsg)
return;
/*
* We use write() instead of printf() so that this function
* is safe to call from a signal handler.
*/
ret = write(STDOUT_FILENO, "\n", 1);
zfs_dbgmsg_print("ztest");
}
#define BACKTRACE_SZ 100
static void sig_handler(int signo)
{
struct sigaction action;
#if (__GLIBC__ && !__UCLIBC__) /* backtrace() is a GNU extension */
int nptrs;
void *buffer[BACKTRACE_SZ];
nptrs = backtrace(buffer, BACKTRACE_SZ);
backtrace_symbols_fd(buffer, nptrs, STDERR_FILENO);
#endif
dump_debug_buffer();
/*
* Restore default action and re-raise signal so SIGSEGV and
* SIGABRT can trigger a core dump.
*/
action.sa_handler = SIG_DFL;
sigemptyset(&action.sa_mask);
action.sa_flags = 0;
(void) sigaction(signo, &action, NULL);
raise(signo);
}
#define FATAL_MSG_SZ 1024
static const char *fatal_msg;
static __attribute__((format(printf, 2, 3))) __attribute__((noreturn)) void
fatal(int do_perror, const char *message, ...)
{
va_list args;
int save_errno = errno;
char *buf;
(void) fflush(stdout);
buf = umem_alloc(FATAL_MSG_SZ, UMEM_NOFAIL);
if (buf == NULL)
goto out;
va_start(args, message);
(void) sprintf(buf, "ztest: ");
/* LINTED */
(void) vsprintf(buf + strlen(buf), message, args);
va_end(args);
if (do_perror) {
(void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf),
": %s", strerror(save_errno));
}
(void) fprintf(stderr, "%s\n", buf);
fatal_msg = buf; /* to ease debugging */
out:
if (ztest_dump_core)
abort();
else
dump_debug_buffer();
exit(3);
}
static int
str2shift(const char *buf)
{
const char *ends = "BKMGTPEZ";
int i;
if (buf[0] == '\0')
return (0);
for (i = 0; i < strlen(ends); i++) {
if (toupper(buf[0]) == ends[i])
break;
}
if (i == strlen(ends)) {
(void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n",
buf);
usage(B_FALSE);
}
if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) {
return (10*i);
}
(void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf);
usage(B_FALSE);
}
static uint64_t
nicenumtoull(const char *buf)
{
char *end;
uint64_t val;
val = strtoull(buf, &end, 0);
if (end == buf) {
(void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf);
usage(B_FALSE);
} else if (end[0] == '.') {
double fval = strtod(buf, &end);
fval *= pow(2, str2shift(end));
/*
* UINT64_MAX is not exactly representable as a double.
* The closest representation is UINT64_MAX + 1, so we
* use a >= comparison instead of > for the bounds check.
*/
if (fval >= (double)UINT64_MAX) {
(void) fprintf(stderr, "ztest: value too large: %s\n",
buf);
usage(B_FALSE);
}
val = (uint64_t)fval;
} else {
int shift = str2shift(end);
if (shift >= 64 || (val << shift) >> shift != val) {
(void) fprintf(stderr, "ztest: value too large: %s\n",
buf);
usage(B_FALSE);
}
val <<= shift;
}
return (val);
}
typedef struct ztest_option {
const char short_opt;
const char *long_opt;
const char *long_opt_param;
const char *comment;
unsigned int default_int;
const char *default_str;
} ztest_option_t;
/*
* The following option_table is used for generating the usage info as well as
* the long and short option information for calling getopt_long().
*/
static ztest_option_t option_table[] = {
{ 'v', "vdevs", "INTEGER", "Number of vdevs", DEFAULT_VDEV_COUNT,
NULL},
{ 's', "vdev-size", "INTEGER", "Size of each vdev",
NO_DEFAULT, DEFAULT_VDEV_SIZE_STR},
{ 'a', "alignment-shift", "INTEGER",
"Alignment shift; use 0 for random", DEFAULT_ASHIFT, NULL},
{ 'm', "mirror-copies", "INTEGER", "Number of mirror copies",
DEFAULT_MIRRORS, NULL},
{ 'r', "raid-disks", "INTEGER", "Number of raidz/draid disks",
DEFAULT_RAID_CHILDREN, NULL},
{ 'R', "raid-parity", "INTEGER", "Raid parity",
DEFAULT_RAID_PARITY, NULL},
{ 'K', "raid-kind", "raidz|draid|random", "Raid kind",
NO_DEFAULT, "random"},
{ 'D', "draid-data", "INTEGER", "Number of draid data drives",
DEFAULT_DRAID_DATA, NULL},
{ 'S', "draid-spares", "INTEGER", "Number of draid spares",
DEFAULT_DRAID_SPARES, NULL},
{ 'd', "datasets", "INTEGER", "Number of datasets",
DEFAULT_DATASETS_COUNT, NULL},
{ 't', "threads", "INTEGER", "Number of ztest threads",
DEFAULT_THREADS, NULL},
{ 'g', "gang-block-threshold", "INTEGER",
"Metaslab gang block threshold",
NO_DEFAULT, DEFAULT_FORCE_GANGING_STR},
{ 'i', "init-count", "INTEGER", "Number of times to initialize pool",
DEFAULT_INITS, NULL},
{ 'k', "kill-percentage", "INTEGER", "Kill percentage",
NO_DEFAULT, DEFAULT_KILLRATE_STR},
{ 'p', "pool-name", "STRING", "Pool name",
NO_DEFAULT, DEFAULT_POOL},
{ 'f', "vdev-file-directory", "PATH", "File directory for vdev files",
NO_DEFAULT, DEFAULT_VDEV_DIR},
{ 'M', "multi-host", NULL,
"Multi-host; simulate pool imported on remote host",
NO_DEFAULT, NULL},
{ 'E', "use-existing-pool", NULL,
"Use existing pool instead of creating new one", NO_DEFAULT, NULL},
{ 'T', "run-time", "INTEGER", "Total run time",
NO_DEFAULT, DEFAULT_RUN_TIME_STR},
{ 'P', "pass-time", "INTEGER", "Time per pass",
NO_DEFAULT, DEFAULT_PASS_TIME_STR},
{ 'F', "freeze-loops", "INTEGER", "Max loops in spa_freeze()",
DEFAULT_MAX_LOOPS, NULL},
{ 'B', "alt-ztest", "PATH", "Alternate ztest path",
NO_DEFAULT, NULL},
{ 'C', "vdev-class-state", "on|off|random", "vdev class state",
NO_DEFAULT, "random"},
{ 'o', "option", "\"OPTION=INTEGER\"",
"Set global variable to an unsigned 32-bit integer value",
NO_DEFAULT, NULL},
{ 'G', "dump-debug-msg", NULL,
"Dump zfs_dbgmsg buffer before exiting due to an error",
NO_DEFAULT, NULL},
{ 'V', "verbose", NULL,
"Verbose (use multiple times for ever more verbosity)",
NO_DEFAULT, NULL},
{ 'h', "help", NULL, "Show this help",
NO_DEFAULT, NULL},
{0, 0, 0, 0, 0, 0}
};
static struct option *long_opts = NULL;
static char *short_opts = NULL;
static void
init_options(void)
{
ASSERT3P(long_opts, ==, NULL);
ASSERT3P(short_opts, ==, NULL);
int count = sizeof (option_table) / sizeof (option_table[0]);
long_opts = umem_alloc(sizeof (struct option) * count, UMEM_NOFAIL);
short_opts = umem_alloc(sizeof (char) * 2 * count, UMEM_NOFAIL);
int short_opt_index = 0;
for (int i = 0; i < count; i++) {
long_opts[i].val = option_table[i].short_opt;
long_opts[i].name = option_table[i].long_opt;
long_opts[i].has_arg = option_table[i].long_opt_param != NULL
? required_argument : no_argument;
long_opts[i].flag = NULL;
short_opts[short_opt_index++] = option_table[i].short_opt;
if (option_table[i].long_opt_param != NULL) {
short_opts[short_opt_index++] = ':';
}
}
}
static void
fini_options(void)
{
int count = sizeof (option_table) / sizeof (option_table[0]);
umem_free(long_opts, sizeof (struct option) * count);
umem_free(short_opts, sizeof (char) * 2 * count);
long_opts = NULL;
short_opts = NULL;
}
static __attribute__((noreturn)) void
usage(boolean_t requested)
{
char option[80];
FILE *fp = requested ? stdout : stderr;
(void) fprintf(fp, "Usage: %s [OPTIONS...]\n", DEFAULT_POOL);
for (int i = 0; option_table[i].short_opt != 0; i++) {
if (option_table[i].long_opt_param != NULL) {
(void) sprintf(option, " -%c --%s=%s",
option_table[i].short_opt,
option_table[i].long_opt,
option_table[i].long_opt_param);
} else {
(void) sprintf(option, " -%c --%s",
option_table[i].short_opt,
option_table[i].long_opt);
}
(void) fprintf(fp, " %-40s%s", option,
option_table[i].comment);
if (option_table[i].long_opt_param != NULL) {
if (option_table[i].default_str != NULL) {
(void) fprintf(fp, " (default: %s)",
option_table[i].default_str);
} else if (option_table[i].default_int != NO_DEFAULT) {
(void) fprintf(fp, " (default: %u)",
option_table[i].default_int);
}
}
(void) fprintf(fp, "\n");
}
exit(requested ? 0 : 1);
}
static uint64_t
ztest_random(uint64_t range)
{
uint64_t r;
ASSERT3S(ztest_fd_rand, >=, 0);
if (range == 0)
return (0);
if (read(ztest_fd_rand, &r, sizeof (r)) != sizeof (r))
fatal(B_TRUE, "short read from /dev/urandom");
return (r % range);
}
static void
ztest_parse_name_value(const char *input, ztest_shared_opts_t *zo)
{
char name[32];
char *value;
int state = ZTEST_VDEV_CLASS_RND;
(void) strlcpy(name, input, sizeof (name));
value = strchr(name, '=');
if (value == NULL) {
(void) fprintf(stderr, "missing value in property=value "
"'-C' argument (%s)\n", input);
usage(B_FALSE);
}
*(value) = '\0';
value++;
if (strcmp(value, "on") == 0) {
state = ZTEST_VDEV_CLASS_ON;
} else if (strcmp(value, "off") == 0) {
state = ZTEST_VDEV_CLASS_OFF;
} else if (strcmp(value, "random") == 0) {
state = ZTEST_VDEV_CLASS_RND;
} else {
(void) fprintf(stderr, "invalid property value '%s'\n", value);
usage(B_FALSE);
}
if (strcmp(name, "special") == 0) {
zo->zo_special_vdevs = state;
} else {
(void) fprintf(stderr, "invalid property name '%s'\n", name);
usage(B_FALSE);
}
if (zo->zo_verbose >= 3)
(void) printf("%s vdev state is '%s'\n", name, value);
}
static void
process_options(int argc, char **argv)
{
char *path;
ztest_shared_opts_t *zo = &ztest_opts;
int opt;
uint64_t value;
const char *raid_kind = "random";
memcpy(zo, &ztest_opts_defaults, sizeof (*zo));
init_options();
while ((opt = getopt_long(argc, argv, short_opts, long_opts,
NULL)) != EOF) {
value = 0;
switch (opt) {
case 'v':
case 's':
case 'a':
case 'm':
case 'r':
case 'R':
case 'D':
case 'S':
case 'd':
case 't':
case 'g':
case 'i':
case 'k':
case 'T':
case 'P':
case 'F':
value = nicenumtoull(optarg);
}
switch (opt) {
case 'v':
zo->zo_vdevs = value;
break;
case 's':
zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value);
break;
case 'a':
zo->zo_ashift = value;
break;
case 'm':
zo->zo_mirrors = value;
break;
case 'r':
zo->zo_raid_children = MAX(1, value);
break;
case 'R':
zo->zo_raid_parity = MIN(MAX(value, 1), 3);
break;
case 'K':
raid_kind = optarg;
break;
case 'D':
zo->zo_draid_data = MAX(1, value);
break;
case 'S':
zo->zo_draid_spares = MAX(1, value);
break;
case 'd':
zo->zo_datasets = MAX(1, value);
break;
case 't':
zo->zo_threads = MAX(1, value);
break;
case 'g':
zo->zo_metaslab_force_ganging =
MAX(SPA_MINBLOCKSIZE << 1, value);
break;
case 'i':
zo->zo_init = value;
break;
case 'k':
zo->zo_killrate = value;
break;
case 'p':
(void) strlcpy(zo->zo_pool, optarg,
sizeof (zo->zo_pool));
break;
case 'f':
path = realpath(optarg, NULL);
if (path == NULL) {
(void) fprintf(stderr, "error: %s: %s\n",
optarg, strerror(errno));
usage(B_FALSE);
} else {
(void) strlcpy(zo->zo_dir, path,
sizeof (zo->zo_dir));
free(path);
}
break;
case 'M':
zo->zo_mmp_test = 1;
break;
case 'V':
zo->zo_verbose++;
break;
case 'E':
zo->zo_init = 0;
break;
case 'T':
zo->zo_time = value;
break;
case 'P':
zo->zo_passtime = MAX(1, value);
break;
case 'F':
zo->zo_maxloops = MAX(1, value);
break;
case 'B':
(void) strlcpy(zo->zo_alt_ztest, optarg,
sizeof (zo->zo_alt_ztest));
break;
case 'C':
ztest_parse_name_value(optarg, zo);
break;
case 'o':
if (zo->zo_gvars_count >= ZO_GVARS_MAX_COUNT) {
(void) fprintf(stderr,
"max global var count (%zu) exceeded\n",
ZO_GVARS_MAX_COUNT);
usage(B_FALSE);
}
char *v = zo->zo_gvars[zo->zo_gvars_count];
if (strlcpy(v, optarg, ZO_GVARS_MAX_ARGLEN) >=
ZO_GVARS_MAX_ARGLEN) {
(void) fprintf(stderr,
"global var option '%s' is too long\n",
optarg);
usage(B_FALSE);
}
zo->zo_gvars_count++;
break;
case 'G':
zo->zo_dump_dbgmsg = 1;
break;
case 'h':
usage(B_TRUE);
break;
case '?':
default:
usage(B_FALSE);
break;
}
}
fini_options();
/* When raid choice is 'random' add a draid pool 50% of the time */
if (strcmp(raid_kind, "random") == 0) {
raid_kind = (ztest_random(2) == 0) ? "draid" : "raidz";
if (ztest_opts.zo_verbose >= 3)
(void) printf("choosing RAID type '%s'\n", raid_kind);
}
if (strcmp(raid_kind, "draid") == 0) {
uint64_t min_devsize;
/* With fewer disk use 256M, otherwise 128M is OK */
min_devsize = (ztest_opts.zo_raid_children < 16) ?
(256ULL << 20) : (128ULL << 20);
/* No top-level mirrors with dRAID for now */
zo->zo_mirrors = 0;
/* Use more appropriate defaults for dRAID */
if (zo->zo_vdevs == ztest_opts_defaults.zo_vdevs)
zo->zo_vdevs = 1;
if (zo->zo_raid_children ==
ztest_opts_defaults.zo_raid_children)
zo->zo_raid_children = 16;
if (zo->zo_ashift < 12)
zo->zo_ashift = 12;
if (zo->zo_vdev_size < min_devsize)
zo->zo_vdev_size = min_devsize;
if (zo->zo_draid_data + zo->zo_raid_parity >
zo->zo_raid_children - zo->zo_draid_spares) {
(void) fprintf(stderr, "error: too few draid "
"children (%d) for stripe width (%d)\n",
zo->zo_raid_children,
zo->zo_draid_data + zo->zo_raid_parity);
usage(B_FALSE);
}
(void) strlcpy(zo->zo_raid_type, VDEV_TYPE_DRAID,
sizeof (zo->zo_raid_type));
} else /* using raidz */ {
ASSERT0(strcmp(raid_kind, "raidz"));
zo->zo_raid_parity = MIN(zo->zo_raid_parity,
zo->zo_raid_children - 1);
}
zo->zo_vdevtime =
(zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs :
UINT64_MAX >> 2);
if (*zo->zo_alt_ztest) {
const char *invalid_what = "ztest";
char *val = zo->zo_alt_ztest;
if (0 != access(val, X_OK) ||
(strrchr(val, '/') == NULL && (errno == EINVAL)))
goto invalid;
int dirlen = strrchr(val, '/') - val;
strlcpy(zo->zo_alt_libpath, val,
MIN(sizeof (zo->zo_alt_libpath), dirlen + 1));
invalid_what = "library path", val = zo->zo_alt_libpath;
if (strrchr(val, '/') == NULL && (errno == EINVAL))
goto invalid;
*strrchr(val, '/') = '\0';
strlcat(val, "/lib", sizeof (zo->zo_alt_libpath));
if (0 != access(zo->zo_alt_libpath, X_OK))
goto invalid;
return;
invalid:
ztest_dump_core = B_FALSE;
fatal(B_TRUE, "invalid alternate %s %s", invalid_what, val);
}
}
static void
ztest_kill(ztest_shared_t *zs)
{
zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa));
zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa));
/*
* Before we kill ourselves, make sure that the config is updated.
* See comment above spa_write_cachefile().
*/
mutex_enter(&spa_namespace_lock);
spa_write_cachefile(ztest_spa, B_FALSE, B_FALSE, B_FALSE);
mutex_exit(&spa_namespace_lock);
(void) raise(SIGKILL);
}
static void
ztest_record_enospc(const char *s)
{
(void) s;
ztest_shared->zs_enospc_count++;
}
static uint64_t
ztest_get_ashift(void)
{
if (ztest_opts.zo_ashift == 0)
return (SPA_MINBLOCKSHIFT + ztest_random(5));
return (ztest_opts.zo_ashift);
}
static boolean_t
ztest_is_draid_spare(const char *name)
{
uint64_t spare_id = 0, parity = 0, vdev_id = 0;
if (sscanf(name, VDEV_TYPE_DRAID "%"PRIu64"-%"PRIu64"-%"PRIu64"",
&parity, &vdev_id, &spare_id) == 3) {
return (B_TRUE);
}
return (B_FALSE);
}
static nvlist_t *
make_vdev_file(const char *path, const char *aux, const char *pool,
size_t size, uint64_t ashift)
{
char *pathbuf = NULL;
uint64_t vdev;
nvlist_t *file;
boolean_t draid_spare = B_FALSE;
if (ashift == 0)
ashift = ztest_get_ashift();
if (path == NULL) {
pathbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
path = pathbuf;
if (aux != NULL) {
vdev = ztest_shared->zs_vdev_aux;
(void) snprintf(pathbuf, MAXPATHLEN,
ztest_aux_template, ztest_opts.zo_dir,
pool == NULL ? ztest_opts.zo_pool : pool,
aux, vdev);
} else {
vdev = ztest_shared->zs_vdev_next_leaf++;
(void) snprintf(pathbuf, MAXPATHLEN,
ztest_dev_template, ztest_opts.zo_dir,
pool == NULL ? ztest_opts.zo_pool : pool, vdev);
}
} else {
draid_spare = ztest_is_draid_spare(path);
}
if (size != 0 && !draid_spare) {
int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666);
if (fd == -1)
fatal(B_TRUE, "can't open %s", path);
if (ftruncate(fd, size) != 0)
fatal(B_TRUE, "can't ftruncate %s", path);
(void) close(fd);
}
file = fnvlist_alloc();
fnvlist_add_string(file, ZPOOL_CONFIG_TYPE,
draid_spare ? VDEV_TYPE_DRAID_SPARE : VDEV_TYPE_FILE);
fnvlist_add_string(file, ZPOOL_CONFIG_PATH, path);
fnvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift);
umem_free(pathbuf, MAXPATHLEN);
return (file);
}
static nvlist_t *
make_vdev_raid(const char *path, const char *aux, const char *pool, size_t size,
uint64_t ashift, int r)
{
nvlist_t *raid, **child;
int c;
if (r < 2)
return (make_vdev_file(path, aux, pool, size, ashift));
child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL);
for (c = 0; c < r; c++)
child[c] = make_vdev_file(path, aux, pool, size, ashift);
raid = fnvlist_alloc();
fnvlist_add_string(raid, ZPOOL_CONFIG_TYPE,
ztest_opts.zo_raid_type);
fnvlist_add_uint64(raid, ZPOOL_CONFIG_NPARITY,
ztest_opts.zo_raid_parity);
fnvlist_add_nvlist_array(raid, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t **)child, r);
if (strcmp(ztest_opts.zo_raid_type, VDEV_TYPE_DRAID) == 0) {
uint64_t ndata = ztest_opts.zo_draid_data;
uint64_t nparity = ztest_opts.zo_raid_parity;
uint64_t nspares = ztest_opts.zo_draid_spares;
uint64_t children = ztest_opts.zo_raid_children;
uint64_t ngroups = 1;
/*
* Calculate the minimum number of groups required to fill a
* slice. This is the LCM of the stripe width (data + parity)
* and the number of data drives (children - spares).
*/
while (ngroups * (ndata + nparity) % (children - nspares) != 0)
ngroups++;
/* Store the basic dRAID configuration. */
fnvlist_add_uint64(raid, ZPOOL_CONFIG_DRAID_NDATA, ndata);
fnvlist_add_uint64(raid, ZPOOL_CONFIG_DRAID_NSPARES, nspares);
fnvlist_add_uint64(raid, ZPOOL_CONFIG_DRAID_NGROUPS, ngroups);
}
for (c = 0; c < r; c++)
fnvlist_free(child[c]);
umem_free(child, r * sizeof (nvlist_t *));
return (raid);
}
static nvlist_t *
make_vdev_mirror(const char *path, const char *aux, const char *pool,
size_t size, uint64_t ashift, int r, int m)
{
nvlist_t *mirror, **child;
int c;
if (m < 1)
return (make_vdev_raid(path, aux, pool, size, ashift, r));
child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL);
for (c = 0; c < m; c++)
child[c] = make_vdev_raid(path, aux, pool, size, ashift, r);
mirror = fnvlist_alloc();
fnvlist_add_string(mirror, ZPOOL_CONFIG_TYPE, VDEV_TYPE_MIRROR);
fnvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t **)child, m);
for (c = 0; c < m; c++)
fnvlist_free(child[c]);
umem_free(child, m * sizeof (nvlist_t *));
return (mirror);
}
static nvlist_t *
make_vdev_root(const char *path, const char *aux, const char *pool, size_t size,
uint64_t ashift, const char *class, int r, int m, int t)
{
nvlist_t *root, **child;
int c;
boolean_t log;
ASSERT3S(t, >, 0);
log = (class != NULL && strcmp(class, "log") == 0);
child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL);
for (c = 0; c < t; c++) {
child[c] = make_vdev_mirror(path, aux, pool, size, ashift,
r, m);
fnvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG, log);
if (class != NULL && class[0] != '\0') {
ASSERT(m > 1 || log); /* expecting a mirror */
fnvlist_add_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, class);
}
}
root = fnvlist_alloc();
fnvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT);
fnvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN,
(const nvlist_t **)child, t);
for (c = 0; c < t; c++)
fnvlist_free(child[c]);
umem_free(child, t * sizeof (nvlist_t *));
return (root);
}
/*
* Find a random spa version. Returns back a random spa version in the
* range [initial_version, SPA_VERSION_FEATURES].
*/
static uint64_t
ztest_random_spa_version(uint64_t initial_version)
{
uint64_t version = initial_version;
if (version <= SPA_VERSION_BEFORE_FEATURES) {
version = version +
ztest_random(SPA_VERSION_BEFORE_FEATURES - version + 1);
}
if (version > SPA_VERSION_BEFORE_FEATURES)
version = SPA_VERSION_FEATURES;
ASSERT(SPA_VERSION_IS_SUPPORTED(version));
return (version);
}
static int
ztest_random_blocksize(void)
{
ASSERT3U(ztest_spa->spa_max_ashift, !=, 0);
/*
* Choose a block size >= the ashift.
* If the SPA supports new MAXBLOCKSIZE, test up to 1MB blocks.
*/
int maxbs = SPA_OLD_MAXBLOCKSHIFT;
if (spa_maxblocksize(ztest_spa) == SPA_MAXBLOCKSIZE)
maxbs = 20;
uint64_t block_shift =
ztest_random(maxbs - ztest_spa->spa_max_ashift + 1);
return (1 << (SPA_MINBLOCKSHIFT + block_shift));
}
static int
ztest_random_dnodesize(void)
{
int slots;
int max_slots = spa_maxdnodesize(ztest_spa) >> DNODE_SHIFT;
if (max_slots == DNODE_MIN_SLOTS)
return (DNODE_MIN_SIZE);
/*
* Weight the random distribution more heavily toward smaller
* dnode sizes since that is more likely to reflect real-world
* usage.
*/
ASSERT3U(max_slots, >, 4);
switch (ztest_random(10)) {
case 0:
slots = 5 + ztest_random(max_slots - 4);
break;
case 1 ... 4:
slots = 2 + ztest_random(3);
break;
default:
slots = 1;
break;
}
return (slots << DNODE_SHIFT);
}
static int
ztest_random_ibshift(void)
{
return (DN_MIN_INDBLKSHIFT +
ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1));
}
static uint64_t
ztest_random_vdev_top(spa_t *spa, boolean_t log_ok)
{
uint64_t top;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *tvd;
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
do {
top = ztest_random(rvd->vdev_children);
tvd = rvd->vdev_child[top];
} while (!vdev_is_concrete(tvd) || (tvd->vdev_islog && !log_ok) ||
tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL);
return (top);
}
static uint64_t
ztest_random_dsl_prop(zfs_prop_t prop)
{
uint64_t value;
do {
value = zfs_prop_random_value(prop, ztest_random(-1ULL));
} while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF);
return (value);
}
static int
ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value,
boolean_t inherit)
{
const char *propname = zfs_prop_to_name(prop);
const char *valname;
char *setpoint;
uint64_t curval;
int error;
error = dsl_prop_set_int(osname, propname,
(inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL), value);
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
return (error);
}
ASSERT0(error);
setpoint = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
VERIFY0(dsl_prop_get_integer(osname, propname, &curval, setpoint));
if (ztest_opts.zo_verbose >= 6) {
int err;
err = zfs_prop_index_to_string(prop, curval, &valname);
if (err)
(void) printf("%s %s = %llu at '%s'\n", osname,
propname, (unsigned long long)curval, setpoint);
else
(void) printf("%s %s = %s at '%s'\n",
osname, propname, valname, setpoint);
}
umem_free(setpoint, MAXPATHLEN);
return (error);
}
static int
ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value)
{
spa_t *spa = ztest_spa;
nvlist_t *props = NULL;
int error;
props = fnvlist_alloc();
fnvlist_add_uint64(props, zpool_prop_to_name(prop), value);
error = spa_prop_set(spa, props);
fnvlist_free(props);
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
return (error);
}
ASSERT0(error);
return (error);
}
static int
ztest_dmu_objset_own(const char *name, dmu_objset_type_t type,
boolean_t readonly, boolean_t decrypt, const void *tag, objset_t **osp)
{
int err;
char *cp = NULL;
char ddname[ZFS_MAX_DATASET_NAME_LEN];
strlcpy(ddname, name, sizeof (ddname));
cp = strchr(ddname, '@');
if (cp != NULL)
*cp = '\0';
err = dmu_objset_own(name, type, readonly, decrypt, tag, osp);
while (decrypt && err == EACCES) {
dsl_crypto_params_t *dcp;
nvlist_t *crypto_args = fnvlist_alloc();
fnvlist_add_uint8_array(crypto_args, "wkeydata",
(uint8_t *)ztest_wkeydata, WRAPPING_KEY_LEN);
VERIFY0(dsl_crypto_params_create_nvlist(DCP_CMD_NONE, NULL,
crypto_args, &dcp));
err = spa_keystore_load_wkey(ddname, dcp, B_FALSE);
/*
* Note: if there was an error loading, the wkey was not
* consumed, and needs to be freed.
*/
dsl_crypto_params_free(dcp, (err != 0));
fnvlist_free(crypto_args);
if (err == EINVAL) {
/*
* We couldn't load a key for this dataset so try
* the parent. This loop will eventually hit the
* encryption root since ztest only makes clones
* as children of their origin datasets.
*/
cp = strrchr(ddname, '/');
if (cp == NULL)
return (err);
*cp = '\0';
err = EACCES;
continue;
} else if (err != 0) {
break;
}
err = dmu_objset_own(name, type, readonly, decrypt, tag, osp);
break;
}
return (err);
}
static void
ztest_rll_init(rll_t *rll)
{
rll->rll_writer = NULL;
rll->rll_readers = 0;
mutex_init(&rll->rll_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&rll->rll_cv, NULL, CV_DEFAULT, NULL);
}
static void
ztest_rll_destroy(rll_t *rll)
{
ASSERT3P(rll->rll_writer, ==, NULL);
ASSERT0(rll->rll_readers);
mutex_destroy(&rll->rll_lock);
cv_destroy(&rll->rll_cv);
}
static void
ztest_rll_lock(rll_t *rll, rl_type_t type)
{
mutex_enter(&rll->rll_lock);
if (type == RL_READER) {
while (rll->rll_writer != NULL)
(void) cv_wait(&rll->rll_cv, &rll->rll_lock);
rll->rll_readers++;
} else {
while (rll->rll_writer != NULL || rll->rll_readers)
(void) cv_wait(&rll->rll_cv, &rll->rll_lock);
rll->rll_writer = curthread;
}
mutex_exit(&rll->rll_lock);
}
static void
ztest_rll_unlock(rll_t *rll)
{
mutex_enter(&rll->rll_lock);
if (rll->rll_writer) {
ASSERT0(rll->rll_readers);
rll->rll_writer = NULL;
} else {
ASSERT3S(rll->rll_readers, >, 0);
ASSERT3P(rll->rll_writer, ==, NULL);
rll->rll_readers--;
}
if (rll->rll_writer == NULL && rll->rll_readers == 0)
cv_broadcast(&rll->rll_cv);
mutex_exit(&rll->rll_lock);
}
static void
ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type)
{
rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
ztest_rll_lock(rll, type);
}
static void
ztest_object_unlock(ztest_ds_t *zd, uint64_t object)
{
rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
ztest_rll_unlock(rll);
}
static rl_t *
ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset,
uint64_t size, rl_type_t type)
{
uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1));
rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)];
rl_t *rl;
rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL);
rl->rl_object = object;
rl->rl_offset = offset;
rl->rl_size = size;
rl->rl_lock = rll;
ztest_rll_lock(rll, type);
return (rl);
}
static void
ztest_range_unlock(rl_t *rl)
{
rll_t *rll = rl->rl_lock;
ztest_rll_unlock(rll);
umem_free(rl, sizeof (*rl));
}
static void
ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os)
{
zd->zd_os = os;
zd->zd_zilog = dmu_objset_zil(os);
zd->zd_shared = szd;
dmu_objset_name(os, zd->zd_name);
int l;
if (zd->zd_shared != NULL)
zd->zd_shared->zd_seq = 0;
VERIFY0(pthread_rwlock_init(&zd->zd_zilog_lock, NULL));
mutex_init(&zd->zd_dirobj_lock, NULL, MUTEX_DEFAULT, NULL);
for (l = 0; l < ZTEST_OBJECT_LOCKS; l++)
ztest_rll_init(&zd->zd_object_lock[l]);
for (l = 0; l < ZTEST_RANGE_LOCKS; l++)
ztest_rll_init(&zd->zd_range_lock[l]);
}
static void
ztest_zd_fini(ztest_ds_t *zd)
{
int l;
mutex_destroy(&zd->zd_dirobj_lock);
(void) pthread_rwlock_destroy(&zd->zd_zilog_lock);
for (l = 0; l < ZTEST_OBJECT_LOCKS; l++)
ztest_rll_destroy(&zd->zd_object_lock[l]);
for (l = 0; l < ZTEST_RANGE_LOCKS; l++)
ztest_rll_destroy(&zd->zd_range_lock[l]);
}
#define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
static uint64_t
ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag)
{
uint64_t txg;
int error;
/*
* Attempt to assign tx to some transaction group.
*/
error = dmu_tx_assign(tx, txg_how);
if (error) {
if (error == ERESTART) {
ASSERT3U(txg_how, ==, TXG_NOWAIT);
dmu_tx_wait(tx);
} else {
ASSERT3U(error, ==, ENOSPC);
ztest_record_enospc(tag);
}
dmu_tx_abort(tx);
return (0);
}
txg = dmu_tx_get_txg(tx);
ASSERT3U(txg, !=, 0);
return (txg);
}
static void
ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
uint64_t dnodesize, uint64_t offset, uint64_t gen, uint64_t txg,
uint64_t crtxg)
{
bt->bt_magic = BT_MAGIC;
bt->bt_objset = dmu_objset_id(os);
bt->bt_object = object;
bt->bt_dnodesize = dnodesize;
bt->bt_offset = offset;
bt->bt_gen = gen;
bt->bt_txg = txg;
bt->bt_crtxg = crtxg;
}
static void
ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
uint64_t dnodesize, uint64_t offset, uint64_t gen, uint64_t txg,
uint64_t crtxg)
{
ASSERT3U(bt->bt_magic, ==, BT_MAGIC);
ASSERT3U(bt->bt_objset, ==, dmu_objset_id(os));
ASSERT3U(bt->bt_object, ==, object);
ASSERT3U(bt->bt_dnodesize, ==, dnodesize);
ASSERT3U(bt->bt_offset, ==, offset);
ASSERT3U(bt->bt_gen, <=, gen);
ASSERT3U(bt->bt_txg, <=, txg);
ASSERT3U(bt->bt_crtxg, ==, crtxg);
}
static ztest_block_tag_t *
ztest_bt_bonus(dmu_buf_t *db)
{
dmu_object_info_t doi;
ztest_block_tag_t *bt;
dmu_object_info_from_db(db, &doi);
ASSERT3U(doi.doi_bonus_size, <=, db->db_size);
ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt));
bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt));
return (bt);
}
/*
* Generate a token to fill up unused bonus buffer space. Try to make
* it unique to the object, generation, and offset to verify that data
* is not getting overwritten by data from other dnodes.
*/
#define ZTEST_BONUS_FILL_TOKEN(obj, ds, gen, offset) \
(((ds) << 48) | ((gen) << 32) | ((obj) << 8) | (offset))
/*
* Fill up the unused bonus buffer region before the block tag with a
* verifiable pattern. Filling the whole bonus area with non-zero data
* helps ensure that all dnode traversal code properly skips the
* interior regions of large dnodes.
*/
static void
ztest_fill_unused_bonus(dmu_buf_t *db, void *end, uint64_t obj,
objset_t *os, uint64_t gen)
{
uint64_t *bonusp;
ASSERT(IS_P2ALIGNED((char *)end - (char *)db->db_data, 8));
for (bonusp = db->db_data; bonusp < (uint64_t *)end; bonusp++) {
uint64_t token = ZTEST_BONUS_FILL_TOKEN(obj, dmu_objset_id(os),
gen, bonusp - (uint64_t *)db->db_data);
*bonusp = token;
}
}
/*
* Verify that the unused area of a bonus buffer is filled with the
* expected tokens.
*/
static void
ztest_verify_unused_bonus(dmu_buf_t *db, void *end, uint64_t obj,
objset_t *os, uint64_t gen)
{
uint64_t *bonusp;
for (bonusp = db->db_data; bonusp < (uint64_t *)end; bonusp++) {
uint64_t token = ZTEST_BONUS_FILL_TOKEN(obj, dmu_objset_id(os),
gen, bonusp - (uint64_t *)db->db_data);
VERIFY3U(*bonusp, ==, token);
}
}
/*
* ZIL logging ops
*/
#define lrz_type lr_mode
#define lrz_blocksize lr_uid
#define lrz_ibshift lr_gid
#define lrz_bonustype lr_rdev
#define lrz_dnodesize lr_crtime[1]
static void
ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr)
{
char *name = (void *)(lr + 1); /* name follows lr */
size_t namesize = strlen(name) + 1;
itx_t *itx;
if (zil_replaying(zd->zd_zilog, tx))
return;
itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize);
memcpy(&itx->itx_lr + 1, &lr->lr_common + 1,
sizeof (*lr) + namesize - sizeof (lr_t));
zil_itx_assign(zd->zd_zilog, itx, tx);
}
static void
ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object)
{
char *name = (void *)(lr + 1); /* name follows lr */
size_t namesize = strlen(name) + 1;
itx_t *itx;
if (zil_replaying(zd->zd_zilog, tx))
return;
itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize);
memcpy(&itx->itx_lr + 1, &lr->lr_common + 1,
sizeof (*lr) + namesize - sizeof (lr_t));
itx->itx_oid = object;
zil_itx_assign(zd->zd_zilog, itx, tx);
}
static void
ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr)
{
itx_t *itx;
itx_wr_state_t write_state = ztest_random(WR_NUM_STATES);
if (zil_replaying(zd->zd_zilog, tx))
return;
if (lr->lr_length > zil_max_log_data(zd->zd_zilog, sizeof (lr_write_t)))
write_state = WR_INDIRECT;
itx = zil_itx_create(TX_WRITE,
sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0));
if (write_state == WR_COPIED &&
dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length,
((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) {
zil_itx_destroy(itx);
itx = zil_itx_create(TX_WRITE, sizeof (*lr));
write_state = WR_NEED_COPY;
}
itx->itx_private = zd;
itx->itx_wr_state = write_state;
itx->itx_sync = (ztest_random(8) == 0);
memcpy(&itx->itx_lr + 1, &lr->lr_common + 1,
sizeof (*lr) - sizeof (lr_t));
zil_itx_assign(zd->zd_zilog, itx, tx);
}
static void
ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr)
{
itx_t *itx;
if (zil_replaying(zd->zd_zilog, tx))
return;
itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
memcpy(&itx->itx_lr + 1, &lr->lr_common + 1,
sizeof (*lr) - sizeof (lr_t));
itx->itx_sync = B_FALSE;
zil_itx_assign(zd->zd_zilog, itx, tx);
}
static void
ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr)
{
itx_t *itx;
if (zil_replaying(zd->zd_zilog, tx))
return;
itx = zil_itx_create(TX_SETATTR, sizeof (*lr));
memcpy(&itx->itx_lr + 1, &lr->lr_common + 1,
sizeof (*lr) - sizeof (lr_t));
itx->itx_sync = B_FALSE;
zil_itx_assign(zd->zd_zilog, itx, tx);
}
/*
* ZIL replay ops
*/
static int
ztest_replay_create(void *arg1, void *arg2, boolean_t byteswap)
{
ztest_ds_t *zd = arg1;
lr_create_t *lr = arg2;
char *name = (void *)(lr + 1); /* name follows lr */
objset_t *os = zd->zd_os;
ztest_block_tag_t *bbt;
dmu_buf_t *db;
dmu_tx_t *tx;
uint64_t txg;
int error = 0;
int bonuslen;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
ASSERT3U(lr->lr_doid, ==, ZTEST_DIROBJ);
ASSERT3S(name[0], !=, '\0');
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name);
if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
} else {
dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
}
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0)
return (ENOSPC);
ASSERT3U(dmu_objset_zil(os)->zl_replay, ==, !!lr->lr_foid);
bonuslen = DN_BONUS_SIZE(lr->lrz_dnodesize);
if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
if (lr->lr_foid == 0) {
lr->lr_foid = zap_create_dnsize(os,
lr->lrz_type, lr->lrz_bonustype,
bonuslen, lr->lrz_dnodesize, tx);
} else {
error = zap_create_claim_dnsize(os, lr->lr_foid,
lr->lrz_type, lr->lrz_bonustype,
bonuslen, lr->lrz_dnodesize, tx);
}
} else {
if (lr->lr_foid == 0) {
lr->lr_foid = dmu_object_alloc_dnsize(os,
lr->lrz_type, 0, lr->lrz_bonustype,
bonuslen, lr->lrz_dnodesize, tx);
} else {
error = dmu_object_claim_dnsize(os, lr->lr_foid,
lr->lrz_type, 0, lr->lrz_bonustype,
bonuslen, lr->lrz_dnodesize, tx);
}
}
if (error) {
ASSERT3U(error, ==, EEXIST);
ASSERT(zd->zd_zilog->zl_replay);
dmu_tx_commit(tx);
return (error);
}
ASSERT3U(lr->lr_foid, !=, 0);
if (lr->lrz_type != DMU_OT_ZAP_OTHER)
VERIFY0(dmu_object_set_blocksize(os, lr->lr_foid,
lr->lrz_blocksize, lr->lrz_ibshift, tx));
VERIFY0(dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
bbt = ztest_bt_bonus(db);
dmu_buf_will_dirty(db, tx);
ztest_bt_generate(bbt, os, lr->lr_foid, lr->lrz_dnodesize, -1ULL,
lr->lr_gen, txg, txg);
ztest_fill_unused_bonus(db, bbt, lr->lr_foid, os, lr->lr_gen);
dmu_buf_rele(db, FTAG);
VERIFY0(zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1,
&lr->lr_foid, tx));
(void) ztest_log_create(zd, tx, lr);
dmu_tx_commit(tx);
return (0);
}
static int
ztest_replay_remove(void *arg1, void *arg2, boolean_t byteswap)
{
ztest_ds_t *zd = arg1;
lr_remove_t *lr = arg2;
char *name = (void *)(lr + 1); /* name follows lr */
objset_t *os = zd->zd_os;
dmu_object_info_t doi;
dmu_tx_t *tx;
uint64_t object, txg;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
ASSERT3U(lr->lr_doid, ==, ZTEST_DIROBJ);
ASSERT3S(name[0], !=, '\0');
VERIFY0(
zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object));
ASSERT3U(object, !=, 0);
ztest_object_lock(zd, object, RL_WRITER);
VERIFY0(dmu_object_info(os, object, &doi));
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name);
dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
ztest_object_unlock(zd, object);
return (ENOSPC);
}
if (doi.doi_type == DMU_OT_ZAP_OTHER) {
VERIFY0(zap_destroy(os, object, tx));
} else {
VERIFY0(dmu_object_free(os, object, tx));
}
VERIFY0(zap_remove(os, lr->lr_doid, name, tx));
(void) ztest_log_remove(zd, tx, lr, object);
dmu_tx_commit(tx);
ztest_object_unlock(zd, object);
return (0);
}
static int
ztest_replay_write(void *arg1, void *arg2, boolean_t byteswap)
{
ztest_ds_t *zd = arg1;
lr_write_t *lr = arg2;
objset_t *os = zd->zd_os;
void *data = lr + 1; /* data follows lr */
uint64_t offset, length;
ztest_block_tag_t *bt = data;
ztest_block_tag_t *bbt;
uint64_t gen, txg, lrtxg, crtxg;
dmu_object_info_t doi;
dmu_tx_t *tx;
dmu_buf_t *db;
arc_buf_t *abuf = NULL;
rl_t *rl;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
offset = lr->lr_offset;
length = lr->lr_length;
/* If it's a dmu_sync() block, write the whole block */
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
if (length < blocksize) {
offset -= offset % blocksize;
length = blocksize;
}
}
if (bt->bt_magic == BSWAP_64(BT_MAGIC))
byteswap_uint64_array(bt, sizeof (*bt));
if (bt->bt_magic != BT_MAGIC)
bt = NULL;
ztest_object_lock(zd, lr->lr_foid, RL_READER);
rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER);
VERIFY0(dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
dmu_object_info_from_db(db, &doi);
bbt = ztest_bt_bonus(db);
ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
gen = bbt->bt_gen;
crtxg = bbt->bt_crtxg;
lrtxg = lr->lr_common.lrc_txg;
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, lr->lr_foid, offset, length);
if (ztest_random(8) == 0 && length == doi.doi_data_block_size &&
P2PHASE(offset, length) == 0)
abuf = dmu_request_arcbuf(db, length);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
if (abuf != NULL)
dmu_return_arcbuf(abuf);
dmu_buf_rele(db, FTAG);
ztest_range_unlock(rl);
ztest_object_unlock(zd, lr->lr_foid);
return (ENOSPC);
}
if (bt != NULL) {
/*
* Usually, verify the old data before writing new data --
* but not always, because we also want to verify correct
* behavior when the data was not recently read into cache.
*/
ASSERT(doi.doi_data_block_size);
ASSERT0(offset % doi.doi_data_block_size);
if (ztest_random(4) != 0) {
int prefetch = ztest_random(2) ?
DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH;
ztest_block_tag_t rbt;
VERIFY(dmu_read(os, lr->lr_foid, offset,
sizeof (rbt), &rbt, prefetch) == 0);
if (rbt.bt_magic == BT_MAGIC) {
ztest_bt_verify(&rbt, os, lr->lr_foid, 0,
offset, gen, txg, crtxg);
}
}
/*
* Writes can appear to be newer than the bonus buffer because
* the ztest_get_data() callback does a dmu_read() of the
* open-context data, which may be different than the data
* as it was when the write was generated.
*/
if (zd->zd_zilog->zl_replay) {
ztest_bt_verify(bt, os, lr->lr_foid, 0, offset,
MAX(gen, bt->bt_gen), MAX(txg, lrtxg),
bt->bt_crtxg);
}
/*
* Set the bt's gen/txg to the bonus buffer's gen/txg
* so that all of the usual ASSERTs will work.
*/
ztest_bt_generate(bt, os, lr->lr_foid, 0, offset, gen, txg,
crtxg);
}
if (abuf == NULL) {
dmu_write(os, lr->lr_foid, offset, length, data, tx);
} else {
memcpy(abuf->b_data, data, length);
VERIFY0(dmu_assign_arcbuf_by_dbuf(db, offset, abuf, tx));
}
(void) ztest_log_write(zd, tx, lr);
dmu_buf_rele(db, FTAG);
dmu_tx_commit(tx);
ztest_range_unlock(rl);
ztest_object_unlock(zd, lr->lr_foid);
return (0);
}
static int
ztest_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
{
ztest_ds_t *zd = arg1;
lr_truncate_t *lr = arg2;
objset_t *os = zd->zd_os;
dmu_tx_t *tx;
uint64_t txg;
rl_t *rl;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
ztest_object_lock(zd, lr->lr_foid, RL_READER);
rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length,
RL_WRITER);
tx = dmu_tx_create(os);
dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
ztest_range_unlock(rl);
ztest_object_unlock(zd, lr->lr_foid);
return (ENOSPC);
}
VERIFY0(dmu_free_range(os, lr->lr_foid, lr->lr_offset,
lr->lr_length, tx));
(void) ztest_log_truncate(zd, tx, lr);
dmu_tx_commit(tx);
ztest_range_unlock(rl);
ztest_object_unlock(zd, lr->lr_foid);
return (0);
}
static int
ztest_replay_setattr(void *arg1, void *arg2, boolean_t byteswap)
{
ztest_ds_t *zd = arg1;
lr_setattr_t *lr = arg2;
objset_t *os = zd->zd_os;
dmu_tx_t *tx;
dmu_buf_t *db;
ztest_block_tag_t *bbt;
uint64_t txg, lrtxg, crtxg, dnodesize;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
ztest_object_lock(zd, lr->lr_foid, RL_WRITER);
VERIFY0(dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
tx = dmu_tx_create(os);
dmu_tx_hold_bonus(tx, lr->lr_foid);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
dmu_buf_rele(db, FTAG);
ztest_object_unlock(zd, lr->lr_foid);
return (ENOSPC);
}
bbt = ztest_bt_bonus(db);
ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
crtxg = bbt->bt_crtxg;
lrtxg = lr->lr_common.lrc_txg;
dnodesize = bbt->bt_dnodesize;
if (zd->zd_zilog->zl_replay) {
ASSERT3U(lr->lr_size, !=, 0);
ASSERT3U(lr->lr_mode, !=, 0);
ASSERT3U(lrtxg, !=, 0);
} else {
/*
* Randomly change the size and increment the generation.
*/
lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) *
sizeof (*bbt);
lr->lr_mode = bbt->bt_gen + 1;
ASSERT0(lrtxg);
}
/*
* Verify that the current bonus buffer is not newer than our txg.
*/
ztest_bt_verify(bbt, os, lr->lr_foid, dnodesize, -1ULL, lr->lr_mode,
MAX(txg, lrtxg), crtxg);
dmu_buf_will_dirty(db, tx);
ASSERT3U(lr->lr_size, >=, sizeof (*bbt));
ASSERT3U(lr->lr_size, <=, db->db_size);
VERIFY0(dmu_set_bonus(db, lr->lr_size, tx));
bbt = ztest_bt_bonus(db);
ztest_bt_generate(bbt, os, lr->lr_foid, dnodesize, -1ULL, lr->lr_mode,
txg, crtxg);
ztest_fill_unused_bonus(db, bbt, lr->lr_foid, os, bbt->bt_gen);
dmu_buf_rele(db, FTAG);
(void) ztest_log_setattr(zd, tx, lr);
dmu_tx_commit(tx);
ztest_object_unlock(zd, lr->lr_foid);
return (0);
}
static zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = {
NULL, /* 0 no such transaction type */
ztest_replay_create, /* TX_CREATE */
NULL, /* TX_MKDIR */
NULL, /* TX_MKXATTR */
NULL, /* TX_SYMLINK */
ztest_replay_remove, /* TX_REMOVE */
NULL, /* TX_RMDIR */
NULL, /* TX_LINK */
NULL, /* TX_RENAME */
ztest_replay_write, /* TX_WRITE */
ztest_replay_truncate, /* TX_TRUNCATE */
ztest_replay_setattr, /* TX_SETATTR */
NULL, /* TX_ACL */
NULL, /* TX_CREATE_ACL */
NULL, /* TX_CREATE_ATTR */
NULL, /* TX_CREATE_ACL_ATTR */
NULL, /* TX_MKDIR_ACL */
NULL, /* TX_MKDIR_ATTR */
NULL, /* TX_MKDIR_ACL_ATTR */
NULL, /* TX_WRITE2 */
NULL, /* TX_SETSAXATTR */
NULL, /* TX_RENAME_EXCHANGE */
NULL, /* TX_RENAME_WHITEOUT */
};
/*
* ZIL get_data callbacks
*/
static void
ztest_get_done(zgd_t *zgd, int error)
{
(void) error;
ztest_ds_t *zd = zgd->zgd_private;
uint64_t object = ((rl_t *)zgd->zgd_lr)->rl_object;
if (zgd->zgd_db)
dmu_buf_rele(zgd->zgd_db, zgd);
ztest_range_unlock((rl_t *)zgd->zgd_lr);
ztest_object_unlock(zd, object);
umem_free(zgd, sizeof (*zgd));
}
static int
ztest_get_data(void *arg, uint64_t arg2, lr_write_t *lr, char *buf,
struct lwb *lwb, zio_t *zio)
{
(void) arg2;
ztest_ds_t *zd = arg;
objset_t *os = zd->zd_os;
uint64_t object = lr->lr_foid;
uint64_t offset = lr->lr_offset;
uint64_t size = lr->lr_length;
uint64_t txg = lr->lr_common.lrc_txg;
uint64_t crtxg;
dmu_object_info_t doi;
dmu_buf_t *db;
zgd_t *zgd;
int error;
ASSERT3P(lwb, !=, NULL);
- ASSERT3P(zio, !=, NULL);
ASSERT3U(size, !=, 0);
ztest_object_lock(zd, object, RL_READER);
error = dmu_bonus_hold(os, object, FTAG, &db);
if (error) {
ztest_object_unlock(zd, object);
return (error);
}
crtxg = ztest_bt_bonus(db)->bt_crtxg;
if (crtxg == 0 || crtxg > txg) {
dmu_buf_rele(db, FTAG);
ztest_object_unlock(zd, object);
return (ENOENT);
}
dmu_object_info_from_db(db, &doi);
dmu_buf_rele(db, FTAG);
db = NULL;
zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL);
zgd->zgd_lwb = lwb;
zgd->zgd_private = zd;
if (buf != NULL) { /* immediate write */
zgd->zgd_lr = (struct zfs_locked_range *)ztest_range_lock(zd,
object, offset, size, RL_READER);
error = dmu_read(os, object, offset, size, buf,
DMU_READ_NO_PREFETCH);
ASSERT0(error);
} else {
+ ASSERT3P(zio, !=, NULL);
size = doi.doi_data_block_size;
if (ISP2(size)) {
offset = P2ALIGN(offset, size);
} else {
ASSERT3U(offset, <, size);
offset = 0;
}
zgd->zgd_lr = (struct zfs_locked_range *)ztest_range_lock(zd,
object, offset, size, RL_READER);
error = dmu_buf_hold(os, object, offset, zgd, &db,
DMU_READ_NO_PREFETCH);
if (error == 0) {
blkptr_t *bp = &lr->lr_blkptr;
zgd->zgd_db = db;
zgd->zgd_bp = bp;
ASSERT3U(db->db_offset, ==, offset);
ASSERT3U(db->db_size, ==, size);
error = dmu_sync(zio, lr->lr_common.lrc_txg,
ztest_get_done, zgd);
if (error == 0)
return (0);
}
}
ztest_get_done(zgd, error);
return (error);
}
static void *
ztest_lr_alloc(size_t lrsize, char *name)
{
char *lr;
size_t namesize = name ? strlen(name) + 1 : 0;
lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL);
if (name)
memcpy(lr + lrsize, name, namesize);
return (lr);
}
static void
ztest_lr_free(void *lr, size_t lrsize, char *name)
{
size_t namesize = name ? strlen(name) + 1 : 0;
umem_free(lr, lrsize + namesize);
}
/*
* Lookup a bunch of objects. Returns the number of objects not found.
*/
static int
ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count)
{
int missing = 0;
int error;
int i;
ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
for (i = 0; i < count; i++, od++) {
od->od_object = 0;
error = zap_lookup(zd->zd_os, od->od_dir, od->od_name,
sizeof (uint64_t), 1, &od->od_object);
if (error) {
ASSERT3S(error, ==, ENOENT);
ASSERT0(od->od_object);
missing++;
} else {
dmu_buf_t *db;
ztest_block_tag_t *bbt;
dmu_object_info_t doi;
ASSERT3U(od->od_object, !=, 0);
ASSERT0(missing); /* there should be no gaps */
ztest_object_lock(zd, od->od_object, RL_READER);
VERIFY0(dmu_bonus_hold(zd->zd_os, od->od_object,
FTAG, &db));
dmu_object_info_from_db(db, &doi);
bbt = ztest_bt_bonus(db);
ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
od->od_type = doi.doi_type;
od->od_blocksize = doi.doi_data_block_size;
od->od_gen = bbt->bt_gen;
dmu_buf_rele(db, FTAG);
ztest_object_unlock(zd, od->od_object);
}
}
return (missing);
}
static int
ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count)
{
int missing = 0;
int i;
ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
for (i = 0; i < count; i++, od++) {
if (missing) {
od->od_object = 0;
missing++;
continue;
}
lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
lr->lr_doid = od->od_dir;
lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */
lr->lrz_type = od->od_crtype;
lr->lrz_blocksize = od->od_crblocksize;
lr->lrz_ibshift = ztest_random_ibshift();
lr->lrz_bonustype = DMU_OT_UINT64_OTHER;
lr->lrz_dnodesize = od->od_crdnodesize;
lr->lr_gen = od->od_crgen;
lr->lr_crtime[0] = time(NULL);
if (ztest_replay_create(zd, lr, B_FALSE) != 0) {
ASSERT0(missing);
od->od_object = 0;
missing++;
} else {
od->od_object = lr->lr_foid;
od->od_type = od->od_crtype;
od->od_blocksize = od->od_crblocksize;
od->od_gen = od->od_crgen;
ASSERT3U(od->od_object, !=, 0);
}
ztest_lr_free(lr, sizeof (*lr), od->od_name);
}
return (missing);
}
static int
ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count)
{
int missing = 0;
int error;
int i;
ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
od += count - 1;
for (i = count - 1; i >= 0; i--, od--) {
if (missing) {
missing++;
continue;
}
/*
* No object was found.
*/
if (od->od_object == 0)
continue;
lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
lr->lr_doid = od->od_dir;
if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) {
ASSERT3U(error, ==, ENOSPC);
missing++;
} else {
od->od_object = 0;
}
ztest_lr_free(lr, sizeof (*lr), od->od_name);
}
return (missing);
}
static int
ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size,
void *data)
{
lr_write_t *lr;
int error;
lr = ztest_lr_alloc(sizeof (*lr) + size, NULL);
lr->lr_foid = object;
lr->lr_offset = offset;
lr->lr_length = size;
lr->lr_blkoff = 0;
BP_ZERO(&lr->lr_blkptr);
memcpy(lr + 1, data, size);
error = ztest_replay_write(zd, lr, B_FALSE);
ztest_lr_free(lr, sizeof (*lr) + size, NULL);
return (error);
}
static int
ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
{
lr_truncate_t *lr;
int error;
lr = ztest_lr_alloc(sizeof (*lr), NULL);
lr->lr_foid = object;
lr->lr_offset = offset;
lr->lr_length = size;
error = ztest_replay_truncate(zd, lr, B_FALSE);
ztest_lr_free(lr, sizeof (*lr), NULL);
return (error);
}
static int
ztest_setattr(ztest_ds_t *zd, uint64_t object)
{
lr_setattr_t *lr;
int error;
lr = ztest_lr_alloc(sizeof (*lr), NULL);
lr->lr_foid = object;
lr->lr_size = 0;
lr->lr_mode = 0;
error = ztest_replay_setattr(zd, lr, B_FALSE);
ztest_lr_free(lr, sizeof (*lr), NULL);
return (error);
}
static void
ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
{
objset_t *os = zd->zd_os;
dmu_tx_t *tx;
uint64_t txg;
rl_t *rl;
txg_wait_synced(dmu_objset_pool(os), 0);
ztest_object_lock(zd, object, RL_READER);
rl = ztest_range_lock(zd, object, offset, size, RL_WRITER);
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, object, offset, size);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg != 0) {
dmu_prealloc(os, object, offset, size, tx);
dmu_tx_commit(tx);
txg_wait_synced(dmu_objset_pool(os), txg);
} else {
(void) dmu_free_long_range(os, object, offset, size);
}
ztest_range_unlock(rl);
ztest_object_unlock(zd, object);
}
static void
ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset)
{
int err;
ztest_block_tag_t wbt;
dmu_object_info_t doi;
enum ztest_io_type io_type;
uint64_t blocksize;
void *data;
VERIFY0(dmu_object_info(zd->zd_os, object, &doi));
blocksize = doi.doi_data_block_size;
data = umem_alloc(blocksize, UMEM_NOFAIL);
/*
* Pick an i/o type at random, biased toward writing block tags.
*/
io_type = ztest_random(ZTEST_IO_TYPES);
if (ztest_random(2) == 0)
io_type = ZTEST_IO_WRITE_TAG;
(void) pthread_rwlock_rdlock(&zd->zd_zilog_lock);
switch (io_type) {
case ZTEST_IO_WRITE_TAG:
ztest_bt_generate(&wbt, zd->zd_os, object, doi.doi_dnodesize,
offset, 0, 0, 0);
(void) ztest_write(zd, object, offset, sizeof (wbt), &wbt);
break;
case ZTEST_IO_WRITE_PATTERN:
(void) memset(data, 'a' + (object + offset) % 5, blocksize);
if (ztest_random(2) == 0) {
/*
* Induce fletcher2 collisions to ensure that
* zio_ddt_collision() detects and resolves them
* when using fletcher2-verify for deduplication.
*/
((uint64_t *)data)[0] ^= 1ULL << 63;
((uint64_t *)data)[4] ^= 1ULL << 63;
}
(void) ztest_write(zd, object, offset, blocksize, data);
break;
case ZTEST_IO_WRITE_ZEROES:
memset(data, 0, blocksize);
(void) ztest_write(zd, object, offset, blocksize, data);
break;
case ZTEST_IO_TRUNCATE:
(void) ztest_truncate(zd, object, offset, blocksize);
break;
case ZTEST_IO_SETATTR:
(void) ztest_setattr(zd, object);
break;
default:
break;
case ZTEST_IO_REWRITE:
(void) pthread_rwlock_rdlock(&ztest_name_lock);
err = ztest_dsl_prop_set_uint64(zd->zd_name,
ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa),
B_FALSE);
ASSERT(err == 0 || err == ENOSPC);
err = ztest_dsl_prop_set_uint64(zd->zd_name,
ZFS_PROP_COMPRESSION,
ztest_random_dsl_prop(ZFS_PROP_COMPRESSION),
B_FALSE);
ASSERT(err == 0 || err == ENOSPC);
(void) pthread_rwlock_unlock(&ztest_name_lock);
VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data,
DMU_READ_NO_PREFETCH));
(void) ztest_write(zd, object, offset, blocksize, data);
break;
}
(void) pthread_rwlock_unlock(&zd->zd_zilog_lock);
umem_free(data, blocksize);
}
/*
* Initialize an object description template.
*/
static void
ztest_od_init(ztest_od_t *od, uint64_t id, const char *tag, uint64_t index,
dmu_object_type_t type, uint64_t blocksize, uint64_t dnodesize,
uint64_t gen)
{
od->od_dir = ZTEST_DIROBJ;
od->od_object = 0;
od->od_crtype = type;
od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize();
od->od_crdnodesize = dnodesize ? dnodesize : ztest_random_dnodesize();
od->od_crgen = gen;
od->od_type = DMU_OT_NONE;
od->od_blocksize = 0;
od->od_gen = 0;
(void) snprintf(od->od_name, sizeof (od->od_name),
"%s(%"PRId64")[%"PRIu64"]",
tag, id, index);
}
/*
* Lookup or create the objects for a test using the od template.
* If the objects do not all exist, or if 'remove' is specified,
* remove any existing objects and create new ones. Otherwise,
* use the existing objects.
*/
static int
ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove)
{
int count = size / sizeof (*od);
int rv = 0;
mutex_enter(&zd->zd_dirobj_lock);
if ((ztest_lookup(zd, od, count) != 0 || remove) &&
(ztest_remove(zd, od, count) != 0 ||
ztest_create(zd, od, count) != 0))
rv = -1;
zd->zd_od = od;
mutex_exit(&zd->zd_dirobj_lock);
return (rv);
}
void
ztest_zil_commit(ztest_ds_t *zd, uint64_t id)
{
(void) id;
zilog_t *zilog = zd->zd_zilog;
(void) pthread_rwlock_rdlock(&zd->zd_zilog_lock);
zil_commit(zilog, ztest_random(ZTEST_OBJECTS));
/*
* Remember the committed values in zd, which is in parent/child
* shared memory. If we die, the next iteration of ztest_run()
* will verify that the log really does contain this record.
*/
mutex_enter(&zilog->zl_lock);
ASSERT3P(zd->zd_shared, !=, NULL);
ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq);
zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq;
mutex_exit(&zilog->zl_lock);
(void) pthread_rwlock_unlock(&zd->zd_zilog_lock);
}
/*
* This function is designed to simulate the operations that occur during a
* mount/unmount operation. We hold the dataset across these operations in an
* attempt to expose any implicit assumptions about ZIL management.
*/
void
ztest_zil_remount(ztest_ds_t *zd, uint64_t id)
{
(void) id;
objset_t *os = zd->zd_os;
/*
* We hold the ztest_vdev_lock so we don't cause problems with
* other threads that wish to remove a log device, such as
* ztest_device_removal().
*/
mutex_enter(&ztest_vdev_lock);
/*
* We grab the zd_dirobj_lock to ensure that no other thread is
* updating the zil (i.e. adding in-memory log records) and the
* zd_zilog_lock to block any I/O.
*/
mutex_enter(&zd->zd_dirobj_lock);
(void) pthread_rwlock_wrlock(&zd->zd_zilog_lock);
/* zfsvfs_teardown() */
zil_close(zd->zd_zilog);
/* zfsvfs_setup() */
VERIFY3P(zil_open(os, ztest_get_data, NULL), ==, zd->zd_zilog);
zil_replay(os, zd, ztest_replay_vector);
(void) pthread_rwlock_unlock(&zd->zd_zilog_lock);
mutex_exit(&zd->zd_dirobj_lock);
mutex_exit(&ztest_vdev_lock);
}
/*
* Verify that we can't destroy an active pool, create an existing pool,
* or create a pool with a bad vdev spec.
*/
void
ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
ztest_shared_opts_t *zo = &ztest_opts;
spa_t *spa;
nvlist_t *nvroot;
if (zo->zo_mmp_test)
return;
/*
* Attempt to create using a bad file.
*/
nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, NULL, 0, 0, 1);
VERIFY3U(ENOENT, ==,
spa_create("ztest_bad_file", nvroot, NULL, NULL, NULL));
fnvlist_free(nvroot);
/*
* Attempt to create using a bad mirror.
*/
nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, NULL, 0, 2, 1);
VERIFY3U(ENOENT, ==,
spa_create("ztest_bad_mirror", nvroot, NULL, NULL, NULL));
fnvlist_free(nvroot);
/*
* Attempt to create an existing pool. It shouldn't matter
* what's in the nvroot; we should fail with EEXIST.
*/
(void) pthread_rwlock_rdlock(&ztest_name_lock);
nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, NULL, 0, 0, 1);
VERIFY3U(EEXIST, ==,
spa_create(zo->zo_pool, nvroot, NULL, NULL, NULL));
fnvlist_free(nvroot);
/*
* We open a reference to the spa and then we try to export it
* expecting one of the following errors:
*
* EBUSY
* Because of the reference we just opened.
*
* ZFS_ERR_EXPORT_IN_PROGRESS
* For the case that there is another ztest thread doing
* an export concurrently.
*/
VERIFY0(spa_open(zo->zo_pool, &spa, FTAG));
int error = spa_destroy(zo->zo_pool);
if (error != EBUSY && error != ZFS_ERR_EXPORT_IN_PROGRESS) {
fatal(B_FALSE, "spa_destroy(%s) returned unexpected value %d",
spa->spa_name, error);
}
spa_close(spa, FTAG);
(void) pthread_rwlock_unlock(&ztest_name_lock);
}
/*
* Start and then stop the MMP threads to ensure the startup and shutdown code
* works properly. Actual protection and property-related code tested via ZTS.
*/
void
ztest_mmp_enable_disable(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
ztest_shared_opts_t *zo = &ztest_opts;
spa_t *spa = ztest_spa;
if (zo->zo_mmp_test)
return;
/*
* Since enabling MMP involves setting a property, it could not be done
* while the pool is suspended.
*/
if (spa_suspended(spa))
return;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
mutex_enter(&spa->spa_props_lock);
zfs_multihost_fail_intervals = 0;
if (!spa_multihost(spa)) {
spa->spa_multihost = B_TRUE;
mmp_thread_start(spa);
}
mutex_exit(&spa->spa_props_lock);
spa_config_exit(spa, SCL_CONFIG, FTAG);
txg_wait_synced(spa_get_dsl(spa), 0);
mmp_signal_all_threads();
txg_wait_synced(spa_get_dsl(spa), 0);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
mutex_enter(&spa->spa_props_lock);
if (spa_multihost(spa)) {
mmp_thread_stop(spa);
spa->spa_multihost = B_FALSE;
}
mutex_exit(&spa->spa_props_lock);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
void
ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
spa_t *spa;
uint64_t initial_version = SPA_VERSION_INITIAL;
uint64_t version, newversion;
nvlist_t *nvroot, *props;
char *name;
if (ztest_opts.zo_mmp_test)
return;
/* dRAID added after feature flags, skip upgrade test. */
if (strcmp(ztest_opts.zo_raid_type, VDEV_TYPE_DRAID) == 0)
return;
mutex_enter(&ztest_vdev_lock);
name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool);
/*
* Clean up from previous runs.
*/
(void) spa_destroy(name);
nvroot = make_vdev_root(NULL, NULL, name, ztest_opts.zo_vdev_size, 0,
NULL, ztest_opts.zo_raid_children, ztest_opts.zo_mirrors, 1);
/*
* If we're configuring a RAIDZ device then make sure that the
* initial version is capable of supporting that feature.
*/
switch (ztest_opts.zo_raid_parity) {
case 0:
case 1:
initial_version = SPA_VERSION_INITIAL;
break;
case 2:
initial_version = SPA_VERSION_RAIDZ2;
break;
case 3:
initial_version = SPA_VERSION_RAIDZ3;
break;
}
/*
* Create a pool with a spa version that can be upgraded. Pick
* a value between initial_version and SPA_VERSION_BEFORE_FEATURES.
*/
do {
version = ztest_random_spa_version(initial_version);
} while (version > SPA_VERSION_BEFORE_FEATURES);
props = fnvlist_alloc();
fnvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_VERSION), version);
VERIFY0(spa_create(name, nvroot, props, NULL, NULL));
fnvlist_free(nvroot);
fnvlist_free(props);
VERIFY0(spa_open(name, &spa, FTAG));
VERIFY3U(spa_version(spa), ==, version);
newversion = ztest_random_spa_version(version + 1);
if (ztest_opts.zo_verbose >= 4) {
(void) printf("upgrading spa version from "
"%"PRIu64" to %"PRIu64"\n",
version, newversion);
}
spa_upgrade(spa, newversion);
VERIFY3U(spa_version(spa), >, version);
VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config,
zpool_prop_to_name(ZPOOL_PROP_VERSION)));
spa_close(spa, FTAG);
kmem_strfree(name);
mutex_exit(&ztest_vdev_lock);
}
static void
ztest_spa_checkpoint(spa_t *spa)
{
ASSERT(MUTEX_HELD(&ztest_checkpoint_lock));
int error = spa_checkpoint(spa->spa_name);
switch (error) {
case 0:
case ZFS_ERR_DEVRM_IN_PROGRESS:
case ZFS_ERR_DISCARDING_CHECKPOINT:
case ZFS_ERR_CHECKPOINT_EXISTS:
break;
case ENOSPC:
ztest_record_enospc(FTAG);
break;
default:
fatal(B_FALSE, "spa_checkpoint(%s) = %d", spa->spa_name, error);
}
}
static void
ztest_spa_discard_checkpoint(spa_t *spa)
{
ASSERT(MUTEX_HELD(&ztest_checkpoint_lock));
int error = spa_checkpoint_discard(spa->spa_name);
switch (error) {
case 0:
case ZFS_ERR_DISCARDING_CHECKPOINT:
case ZFS_ERR_NO_CHECKPOINT:
break;
default:
fatal(B_FALSE, "spa_discard_checkpoint(%s) = %d",
spa->spa_name, error);
}
}
void
ztest_spa_checkpoint_create_discard(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
spa_t *spa = ztest_spa;
mutex_enter(&ztest_checkpoint_lock);
if (ztest_random(2) == 0) {
ztest_spa_checkpoint(spa);
} else {
ztest_spa_discard_checkpoint(spa);
}
mutex_exit(&ztest_checkpoint_lock);
}
static vdev_t *
vdev_lookup_by_path(vdev_t *vd, const char *path)
{
vdev_t *mvd;
int c;
if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0)
return (vd);
for (c = 0; c < vd->vdev_children; c++)
if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) !=
NULL)
return (mvd);
return (NULL);
}
static int
spa_num_top_vdevs(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT3U(spa_config_held(spa, SCL_VDEV, RW_READER), ==, SCL_VDEV);
return (rvd->vdev_children);
}
/*
* Verify that vdev_add() works as expected.
*/
void
ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
uint64_t leaves;
uint64_t guid;
nvlist_t *nvroot;
int error;
if (ztest_opts.zo_mmp_test)
return;
mutex_enter(&ztest_vdev_lock);
leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) *
ztest_opts.zo_raid_children;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
ztest_shared->zs_vdev_next_leaf = spa_num_top_vdevs(spa) * leaves;
/*
* If we have slogs then remove them 1/4 of the time.
*/
if (spa_has_slogs(spa) && ztest_random(4) == 0) {
metaslab_group_t *mg;
/*
* find the first real slog in log allocation class
*/
mg = spa_log_class(spa)->mc_allocator[0].mca_rotor;
while (!mg->mg_vd->vdev_islog)
mg = mg->mg_next;
guid = mg->mg_vd->vdev_guid;
spa_config_exit(spa, SCL_VDEV, FTAG);
/*
* We have to grab the zs_name_lock as writer to
* prevent a race between removing a slog (dmu_objset_find)
* and destroying a dataset. Removing the slog will
* grab a reference on the dataset which may cause
* dsl_destroy_head() to fail with EBUSY thus
* leaving the dataset in an inconsistent state.
*/
pthread_rwlock_wrlock(&ztest_name_lock);
error = spa_vdev_remove(spa, guid, B_FALSE);
pthread_rwlock_unlock(&ztest_name_lock);
switch (error) {
case 0:
case EEXIST: /* Generic zil_reset() error */
case EBUSY: /* Replay required */
case EACCES: /* Crypto key not loaded */
case ZFS_ERR_CHECKPOINT_EXISTS:
case ZFS_ERR_DISCARDING_CHECKPOINT:
break;
default:
fatal(B_FALSE, "spa_vdev_remove() = %d", error);
}
} else {
spa_config_exit(spa, SCL_VDEV, FTAG);
/*
* Make 1/4 of the devices be log devices
*/
nvroot = make_vdev_root(NULL, NULL, NULL,
ztest_opts.zo_vdev_size, 0, (ztest_random(4) == 0) ?
"log" : NULL, ztest_opts.zo_raid_children, zs->zs_mirrors,
1);
error = spa_vdev_add(spa, nvroot);
fnvlist_free(nvroot);
switch (error) {
case 0:
break;
case ENOSPC:
ztest_record_enospc("spa_vdev_add");
break;
default:
fatal(B_FALSE, "spa_vdev_add() = %d", error);
}
}
mutex_exit(&ztest_vdev_lock);
}
void
ztest_vdev_class_add(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
uint64_t leaves;
nvlist_t *nvroot;
const char *class = (ztest_random(2) == 0) ?
VDEV_ALLOC_BIAS_SPECIAL : VDEV_ALLOC_BIAS_DEDUP;
int error;
/*
* By default add a special vdev 50% of the time
*/
if ((ztest_opts.zo_special_vdevs == ZTEST_VDEV_CLASS_OFF) ||
(ztest_opts.zo_special_vdevs == ZTEST_VDEV_CLASS_RND &&
ztest_random(2) == 0)) {
return;
}
mutex_enter(&ztest_vdev_lock);
/* Only test with mirrors */
if (zs->zs_mirrors < 2) {
mutex_exit(&ztest_vdev_lock);
return;
}
/* requires feature@allocation_classes */
if (!spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES)) {
mutex_exit(&ztest_vdev_lock);
return;
}
leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) *
ztest_opts.zo_raid_children;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
ztest_shared->zs_vdev_next_leaf = spa_num_top_vdevs(spa) * leaves;
spa_config_exit(spa, SCL_VDEV, FTAG);
nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0,
class, ztest_opts.zo_raid_children, zs->zs_mirrors, 1);
error = spa_vdev_add(spa, nvroot);
fnvlist_free(nvroot);
if (error == ENOSPC)
ztest_record_enospc("spa_vdev_add");
else if (error != 0)
fatal(B_FALSE, "spa_vdev_add() = %d", error);
/*
* 50% of the time allow small blocks in the special class
*/
if (error == 0 &&
spa_special_class(spa)->mc_groups == 1 && ztest_random(2) == 0) {
if (ztest_opts.zo_verbose >= 3)
(void) printf("Enabling special VDEV small blocks\n");
error = ztest_dsl_prop_set_uint64(zd->zd_name,
ZFS_PROP_SPECIAL_SMALL_BLOCKS, 32768, B_FALSE);
ASSERT(error == 0 || error == ENOSPC);
}
mutex_exit(&ztest_vdev_lock);
if (ztest_opts.zo_verbose >= 3) {
metaslab_class_t *mc;
if (strcmp(class, VDEV_ALLOC_BIAS_SPECIAL) == 0)
mc = spa_special_class(spa);
else
mc = spa_dedup_class(spa);
(void) printf("Added a %s mirrored vdev (of %d)\n",
class, (int)mc->mc_groups);
}
}
/*
* Verify that adding/removing aux devices (l2arc, hot spare) works as expected.
*/
void
ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
vdev_t *rvd = spa->spa_root_vdev;
spa_aux_vdev_t *sav;
const char *aux;
char *path;
uint64_t guid = 0;
int error, ignore_err = 0;
if (ztest_opts.zo_mmp_test)
return;
path = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
if (ztest_random(2) == 0) {
sav = &spa->spa_spares;
aux = ZPOOL_CONFIG_SPARES;
} else {
sav = &spa->spa_l2cache;
aux = ZPOOL_CONFIG_L2CACHE;
}
mutex_enter(&ztest_vdev_lock);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
if (sav->sav_count != 0 && ztest_random(4) == 0) {
/*
* Pick a random device to remove.
*/
vdev_t *svd = sav->sav_vdevs[ztest_random(sav->sav_count)];
/* dRAID spares cannot be removed; try anyways to see ENOTSUP */
if (strstr(svd->vdev_path, VDEV_TYPE_DRAID) != NULL)
ignore_err = ENOTSUP;
guid = svd->vdev_guid;
} else {
/*
* Find an unused device we can add.
*/
zs->zs_vdev_aux = 0;
for (;;) {
int c;
(void) snprintf(path, MAXPATHLEN, ztest_aux_template,
ztest_opts.zo_dir, ztest_opts.zo_pool, aux,
zs->zs_vdev_aux);
for (c = 0; c < sav->sav_count; c++)
if (strcmp(sav->sav_vdevs[c]->vdev_path,
path) == 0)
break;
if (c == sav->sav_count &&
vdev_lookup_by_path(rvd, path) == NULL)
break;
zs->zs_vdev_aux++;
}
}
spa_config_exit(spa, SCL_VDEV, FTAG);
if (guid == 0) {
/*
* Add a new device.
*/
nvlist_t *nvroot = make_vdev_root(NULL, aux, NULL,
(ztest_opts.zo_vdev_size * 5) / 4, 0, NULL, 0, 0, 1);
error = spa_vdev_add(spa, nvroot);
switch (error) {
case 0:
break;
default:
fatal(B_FALSE, "spa_vdev_add(%p) = %d", nvroot, error);
}
fnvlist_free(nvroot);
} else {
/*
* Remove an existing device. Sometimes, dirty its
* vdev state first to make sure we handle removal
* of devices that have pending state changes.
*/
if (ztest_random(2) == 0)
(void) vdev_online(spa, guid, 0, NULL);
error = spa_vdev_remove(spa, guid, B_FALSE);
switch (error) {
case 0:
case EBUSY:
case ZFS_ERR_CHECKPOINT_EXISTS:
case ZFS_ERR_DISCARDING_CHECKPOINT:
break;
default:
if (error != ignore_err)
fatal(B_FALSE,
"spa_vdev_remove(%"PRIu64") = %d",
guid, error);
}
}
mutex_exit(&ztest_vdev_lock);
umem_free(path, MAXPATHLEN);
}
/*
* split a pool if it has mirror tlvdevs
*/
void
ztest_split_pool(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
vdev_t *rvd = spa->spa_root_vdev;
nvlist_t *tree, **child, *config, *split, **schild;
uint_t c, children, schildren = 0, lastlogid = 0;
int error = 0;
if (ztest_opts.zo_mmp_test)
return;
mutex_enter(&ztest_vdev_lock);
/* ensure we have a usable config; mirrors of raidz aren't supported */
if (zs->zs_mirrors < 3 || ztest_opts.zo_raid_children > 1) {
mutex_exit(&ztest_vdev_lock);
return;
}
/* clean up the old pool, if any */
(void) spa_destroy("splitp");
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
/* generate a config from the existing config */
mutex_enter(&spa->spa_props_lock);
tree = fnvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE);
mutex_exit(&spa->spa_props_lock);
VERIFY0(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN,
&child, &children));
schild = umem_alloc(rvd->vdev_children * sizeof (nvlist_t *),
UMEM_NOFAIL);
for (c = 0; c < children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
nvlist_t **mchild;
uint_t mchildren;
if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) {
schild[schildren] = fnvlist_alloc();
fnvlist_add_string(schild[schildren],
ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE);
fnvlist_add_uint64(schild[schildren],
ZPOOL_CONFIG_IS_HOLE, 1);
if (lastlogid == 0)
lastlogid = schildren;
++schildren;
continue;
}
lastlogid = 0;
VERIFY0(nvlist_lookup_nvlist_array(child[c],
ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren));
schild[schildren++] = fnvlist_dup(mchild[0]);
}
/* OK, create a config that can be used to split */
split = fnvlist_alloc();
fnvlist_add_string(split, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT);
fnvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t **)schild, lastlogid != 0 ? lastlogid : schildren);
config = fnvlist_alloc();
fnvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split);
for (c = 0; c < schildren; c++)
fnvlist_free(schild[c]);
umem_free(schild, rvd->vdev_children * sizeof (nvlist_t *));
fnvlist_free(split);
spa_config_exit(spa, SCL_VDEV, FTAG);
(void) pthread_rwlock_wrlock(&ztest_name_lock);
error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE);
(void) pthread_rwlock_unlock(&ztest_name_lock);
fnvlist_free(config);
if (error == 0) {
(void) printf("successful split - results:\n");
mutex_enter(&spa_namespace_lock);
show_pool_stats(spa);
show_pool_stats(spa_lookup("splitp"));
mutex_exit(&spa_namespace_lock);
++zs->zs_splits;
--zs->zs_mirrors;
}
mutex_exit(&ztest_vdev_lock);
}
/*
* Verify that we can attach and detach devices.
*/
void
ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
spa_aux_vdev_t *sav = &spa->spa_spares;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *oldvd, *newvd, *pvd;
nvlist_t *root;
uint64_t leaves;
uint64_t leaf, top;
uint64_t ashift = ztest_get_ashift();
uint64_t oldguid, pguid;
uint64_t oldsize, newsize;
char *oldpath, *newpath;
int replacing;
int oldvd_has_siblings = B_FALSE;
int newvd_is_spare = B_FALSE;
int newvd_is_dspare = B_FALSE;
int oldvd_is_log;
int oldvd_is_special;
int error, expected_error;
if (ztest_opts.zo_mmp_test)
return;
oldpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
newpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
mutex_enter(&ztest_vdev_lock);
leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raid_children;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* If a vdev is in the process of being removed, its removal may
* finish while we are in progress, leading to an unexpected error
* value. Don't bother trying to attach while we are in the middle
* of removal.
*/
if (ztest_device_removal_active) {
spa_config_exit(spa, SCL_ALL, FTAG);
goto out;
}
/*
* Decide whether to do an attach or a replace.
*/
replacing = ztest_random(2);
/*
* Pick a random top-level vdev.
*/
top = ztest_random_vdev_top(spa, B_TRUE);
/*
* Pick a random leaf within it.
*/
leaf = ztest_random(leaves);
/*
* Locate this vdev.
*/
oldvd = rvd->vdev_child[top];
/* pick a child from the mirror */
if (zs->zs_mirrors >= 1) {
ASSERT3P(oldvd->vdev_ops, ==, &vdev_mirror_ops);
ASSERT3U(oldvd->vdev_children, >=, zs->zs_mirrors);
oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raid_children];
}
/* pick a child out of the raidz group */
if (ztest_opts.zo_raid_children > 1) {
if (strcmp(oldvd->vdev_ops->vdev_op_type, "raidz") == 0)
ASSERT3P(oldvd->vdev_ops, ==, &vdev_raidz_ops);
else
ASSERT3P(oldvd->vdev_ops, ==, &vdev_draid_ops);
ASSERT3U(oldvd->vdev_children, ==, ztest_opts.zo_raid_children);
oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raid_children];
}
/*
* If we're already doing an attach or replace, oldvd may be a
* mirror vdev -- in which case, pick a random child.
*/
while (oldvd->vdev_children != 0) {
oldvd_has_siblings = B_TRUE;
ASSERT3U(oldvd->vdev_children, >=, 2);
oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)];
}
oldguid = oldvd->vdev_guid;
oldsize = vdev_get_min_asize(oldvd);
oldvd_is_log = oldvd->vdev_top->vdev_islog;
oldvd_is_special =
oldvd->vdev_top->vdev_alloc_bias == VDEV_BIAS_SPECIAL ||
oldvd->vdev_top->vdev_alloc_bias == VDEV_BIAS_DEDUP;
(void) strlcpy(oldpath, oldvd->vdev_path, MAXPATHLEN);
pvd = oldvd->vdev_parent;
pguid = pvd->vdev_guid;
/*
* If oldvd has siblings, then half of the time, detach it. Prior
* to the detach the pool is scrubbed in order to prevent creating
* unrepairable blocks as a result of the data corruption injection.
*/
if (oldvd_has_siblings && ztest_random(2) == 0) {
spa_config_exit(spa, SCL_ALL, FTAG);
error = ztest_scrub_impl(spa);
if (error)
goto out;
error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE);
if (error != 0 && error != ENODEV && error != EBUSY &&
error != ENOTSUP && error != ZFS_ERR_CHECKPOINT_EXISTS &&
error != ZFS_ERR_DISCARDING_CHECKPOINT)
fatal(B_FALSE, "detach (%s) returned %d",
oldpath, error);
goto out;
}
/*
* For the new vdev, choose with equal probability between the two
* standard paths (ending in either 'a' or 'b') or a random hot spare.
*/
if (sav->sav_count != 0 && ztest_random(3) == 0) {
newvd = sav->sav_vdevs[ztest_random(sav->sav_count)];
newvd_is_spare = B_TRUE;
if (newvd->vdev_ops == &vdev_draid_spare_ops)
newvd_is_dspare = B_TRUE;
(void) strlcpy(newpath, newvd->vdev_path, MAXPATHLEN);
} else {
(void) snprintf(newpath, MAXPATHLEN, ztest_dev_template,
ztest_opts.zo_dir, ztest_opts.zo_pool,
top * leaves + leaf);
if (ztest_random(2) == 0)
newpath[strlen(newpath) - 1] = 'b';
newvd = vdev_lookup_by_path(rvd, newpath);
}
if (newvd) {
/*
* Reopen to ensure the vdev's asize field isn't stale.
*/
vdev_reopen(newvd);
newsize = vdev_get_min_asize(newvd);
} else {
/*
* Make newsize a little bigger or smaller than oldsize.
* If it's smaller, the attach should fail.
* If it's larger, and we're doing a replace,
* we should get dynamic LUN growth when we're done.
*/
newsize = 10 * oldsize / (9 + ztest_random(3));
}
/*
* If pvd is not a mirror or root, the attach should fail with ENOTSUP,
* unless it's a replace; in that case any non-replacing parent is OK.
*
* If newvd is already part of the pool, it should fail with EBUSY.
*
* If newvd is too small, it should fail with EOVERFLOW.
*
* If newvd is a distributed spare and it's being attached to a
* dRAID which is not its parent it should fail with EINVAL.
*/
if (pvd->vdev_ops != &vdev_mirror_ops &&
pvd->vdev_ops != &vdev_root_ops && (!replacing ||
pvd->vdev_ops == &vdev_replacing_ops ||
pvd->vdev_ops == &vdev_spare_ops))
expected_error = ENOTSUP;
else if (newvd_is_spare &&
(!replacing || oldvd_is_log || oldvd_is_special))
expected_error = ENOTSUP;
else if (newvd == oldvd)
expected_error = replacing ? 0 : EBUSY;
else if (vdev_lookup_by_path(rvd, newpath) != NULL)
expected_error = EBUSY;
else if (!newvd_is_dspare && newsize < oldsize)
expected_error = EOVERFLOW;
else if (ashift > oldvd->vdev_top->vdev_ashift)
expected_error = EDOM;
else if (newvd_is_dspare && pvd != vdev_draid_spare_get_parent(newvd))
expected_error = ENOTSUP;
else
expected_error = 0;
spa_config_exit(spa, SCL_ALL, FTAG);
/*
* Build the nvlist describing newpath.
*/
root = make_vdev_root(newpath, NULL, NULL, newvd == NULL ? newsize : 0,
ashift, NULL, 0, 0, 1);
/*
* When supported select either a healing or sequential resilver.
*/
boolean_t rebuilding = B_FALSE;
if (pvd->vdev_ops == &vdev_mirror_ops ||
pvd->vdev_ops == &vdev_root_ops) {
rebuilding = !!ztest_random(2);
}
error = spa_vdev_attach(spa, oldguid, root, replacing, rebuilding);
fnvlist_free(root);
/*
* If our parent was the replacing vdev, but the replace completed,
* then instead of failing with ENOTSUP we may either succeed,
* fail with ENODEV, or fail with EOVERFLOW.
*/
if (expected_error == ENOTSUP &&
(error == 0 || error == ENODEV || error == EOVERFLOW))
expected_error = error;
/*
* If someone grew the LUN, the replacement may be too small.
*/
if (error == EOVERFLOW || error == EBUSY)
expected_error = error;
if (error == ZFS_ERR_CHECKPOINT_EXISTS ||
error == ZFS_ERR_DISCARDING_CHECKPOINT ||
error == ZFS_ERR_RESILVER_IN_PROGRESS ||
error == ZFS_ERR_REBUILD_IN_PROGRESS)
expected_error = error;
if (error != expected_error && expected_error != EBUSY) {
fatal(B_FALSE, "attach (%s %"PRIu64", %s %"PRIu64", %d) "
"returned %d, expected %d",
oldpath, oldsize, newpath,
newsize, replacing, error, expected_error);
}
out:
mutex_exit(&ztest_vdev_lock);
umem_free(oldpath, MAXPATHLEN);
umem_free(newpath, MAXPATHLEN);
}
void
ztest_device_removal(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
spa_t *spa = ztest_spa;
vdev_t *vd;
uint64_t guid;
int error;
mutex_enter(&ztest_vdev_lock);
if (ztest_device_removal_active) {
mutex_exit(&ztest_vdev_lock);
return;
}
/*
* Remove a random top-level vdev and wait for removal to finish.
*/
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
vd = vdev_lookup_top(spa, ztest_random_vdev_top(spa, B_FALSE));
guid = vd->vdev_guid;
spa_config_exit(spa, SCL_VDEV, FTAG);
error = spa_vdev_remove(spa, guid, B_FALSE);
if (error == 0) {
ztest_device_removal_active = B_TRUE;
mutex_exit(&ztest_vdev_lock);
/*
* spa->spa_vdev_removal is created in a sync task that
* is initiated via dsl_sync_task_nowait(). Since the
* task may not run before spa_vdev_remove() returns, we
* must wait at least 1 txg to ensure that the removal
* struct has been created.
*/
txg_wait_synced(spa_get_dsl(spa), 0);
while (spa->spa_removing_phys.sr_state == DSS_SCANNING)
txg_wait_synced(spa_get_dsl(spa), 0);
} else {
mutex_exit(&ztest_vdev_lock);
return;
}
/*
* The pool needs to be scrubbed after completing device removal.
* Failure to do so may result in checksum errors due to the
* strategy employed by ztest_fault_inject() when selecting which
* offset are redundant and can be damaged.
*/
error = spa_scan(spa, POOL_SCAN_SCRUB);
if (error == 0) {
while (dsl_scan_scrubbing(spa_get_dsl(spa)))
txg_wait_synced(spa_get_dsl(spa), 0);
}
mutex_enter(&ztest_vdev_lock);
ztest_device_removal_active = B_FALSE;
mutex_exit(&ztest_vdev_lock);
}
/*
* Callback function which expands the physical size of the vdev.
*/
static vdev_t *
grow_vdev(vdev_t *vd, void *arg)
{
spa_t *spa __maybe_unused = vd->vdev_spa;
size_t *newsize = arg;
size_t fsize;
int fd;
ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), ==, SCL_STATE);
ASSERT(vd->vdev_ops->vdev_op_leaf);
if ((fd = open(vd->vdev_path, O_RDWR)) == -1)
return (vd);
fsize = lseek(fd, 0, SEEK_END);
VERIFY0(ftruncate(fd, *newsize));
if (ztest_opts.zo_verbose >= 6) {
(void) printf("%s grew from %lu to %lu bytes\n",
vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize);
}
(void) close(fd);
return (NULL);
}
/*
* Callback function which expands a given vdev by calling vdev_online().
*/
static vdev_t *
online_vdev(vdev_t *vd, void *arg)
{
(void) arg;
spa_t *spa = vd->vdev_spa;
vdev_t *tvd = vd->vdev_top;
uint64_t guid = vd->vdev_guid;
uint64_t generation = spa->spa_config_generation + 1;
vdev_state_t newstate = VDEV_STATE_UNKNOWN;
int error;
ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), ==, SCL_STATE);
ASSERT(vd->vdev_ops->vdev_op_leaf);
/* Calling vdev_online will initialize the new metaslabs */
spa_config_exit(spa, SCL_STATE, spa);
error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate);
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
/*
* If vdev_online returned an error or the underlying vdev_open
* failed then we abort the expand. The only way to know that
* vdev_open fails is by checking the returned newstate.
*/
if (error || newstate != VDEV_STATE_HEALTHY) {
if (ztest_opts.zo_verbose >= 5) {
(void) printf("Unable to expand vdev, state %u, "
"error %d\n", newstate, error);
}
return (vd);
}
ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY);
/*
* Since we dropped the lock we need to ensure that we're
* still talking to the original vdev. It's possible this
* vdev may have been detached/replaced while we were
* trying to online it.
*/
if (generation != spa->spa_config_generation) {
if (ztest_opts.zo_verbose >= 5) {
(void) printf("vdev configuration has changed, "
"guid %"PRIu64", state %"PRIu64", "
"expected gen %"PRIu64", got gen %"PRIu64"\n",
guid,
tvd->vdev_state,
generation,
spa->spa_config_generation);
}
return (vd);
}
return (NULL);
}
/*
* Traverse the vdev tree calling the supplied function.
* We continue to walk the tree until we either have walked all
* children or we receive a non-NULL return from the callback.
* If a NULL callback is passed, then we just return back the first
* leaf vdev we encounter.
*/
static vdev_t *
vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg)
{
uint_t c;
if (vd->vdev_ops->vdev_op_leaf) {
if (func == NULL)
return (vd);
else
return (func(vd, arg));
}
for (c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL)
return (cvd);
}
return (NULL);
}
/*
* Verify that dynamic LUN growth works as expected.
*/
void
ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
spa_t *spa = ztest_spa;
vdev_t *vd, *tvd;
metaslab_class_t *mc;
metaslab_group_t *mg;
size_t psize, newsize;
uint64_t top;
uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count;
mutex_enter(&ztest_checkpoint_lock);
mutex_enter(&ztest_vdev_lock);
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
/*
* If there is a vdev removal in progress, it could complete while
* we are running, in which case we would not be able to verify
* that the metaslab_class space increased (because it decreases
* when the device removal completes).
*/
if (ztest_device_removal_active) {
spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&ztest_vdev_lock);
mutex_exit(&ztest_checkpoint_lock);
return;
}
top = ztest_random_vdev_top(spa, B_TRUE);
tvd = spa->spa_root_vdev->vdev_child[top];
mg = tvd->vdev_mg;
mc = mg->mg_class;
old_ms_count = tvd->vdev_ms_count;
old_class_space = metaslab_class_get_space(mc);
/*
* Determine the size of the first leaf vdev associated with
* our top-level device.
*/
vd = vdev_walk_tree(tvd, NULL, NULL);
ASSERT3P(vd, !=, NULL);
ASSERT(vd->vdev_ops->vdev_op_leaf);
psize = vd->vdev_psize;
/*
* We only try to expand the vdev if it's healthy, less than 4x its
* original size, and it has a valid psize.
*/
if (tvd->vdev_state != VDEV_STATE_HEALTHY ||
psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) {
spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&ztest_vdev_lock);
mutex_exit(&ztest_checkpoint_lock);
return;
}
ASSERT3U(psize, >, 0);
newsize = psize + MAX(psize / 8, SPA_MAXBLOCKSIZE);
ASSERT3U(newsize, >, psize);
if (ztest_opts.zo_verbose >= 6) {
(void) printf("Expanding LUN %s from %lu to %lu\n",
vd->vdev_path, (ulong_t)psize, (ulong_t)newsize);
}
/*
* Growing the vdev is a two step process:
* 1). expand the physical size (i.e. relabel)
* 2). online the vdev to create the new metaslabs
*/
if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL ||
vdev_walk_tree(tvd, online_vdev, NULL) != NULL ||
tvd->vdev_state != VDEV_STATE_HEALTHY) {
if (ztest_opts.zo_verbose >= 5) {
(void) printf("Could not expand LUN because "
"the vdev configuration changed.\n");
}
spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&ztest_vdev_lock);
mutex_exit(&ztest_checkpoint_lock);
return;
}
spa_config_exit(spa, SCL_STATE, spa);
/*
* Expanding the LUN will update the config asynchronously,
* thus we must wait for the async thread to complete any
* pending tasks before proceeding.
*/
for (;;) {
boolean_t done;
mutex_enter(&spa->spa_async_lock);
done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks);
mutex_exit(&spa->spa_async_lock);
if (done)
break;
txg_wait_synced(spa_get_dsl(spa), 0);
(void) poll(NULL, 0, 100);
}
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
tvd = spa->spa_root_vdev->vdev_child[top];
new_ms_count = tvd->vdev_ms_count;
new_class_space = metaslab_class_get_space(mc);
if (tvd->vdev_mg != mg || mg->mg_class != mc) {
if (ztest_opts.zo_verbose >= 5) {
(void) printf("Could not verify LUN expansion due to "
"intervening vdev offline or remove.\n");
}
spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&ztest_vdev_lock);
mutex_exit(&ztest_checkpoint_lock);
return;
}
/*
* Make sure we were able to grow the vdev.
*/
if (new_ms_count <= old_ms_count) {
fatal(B_FALSE,
"LUN expansion failed: ms_count %"PRIu64" < %"PRIu64"\n",
old_ms_count, new_ms_count);
}
/*
* Make sure we were able to grow the pool.
*/
if (new_class_space <= old_class_space) {
fatal(B_FALSE,
"LUN expansion failed: class_space %"PRIu64" < %"PRIu64"\n",
old_class_space, new_class_space);
}
if (ztest_opts.zo_verbose >= 5) {
char oldnumbuf[NN_NUMBUF_SZ], newnumbuf[NN_NUMBUF_SZ];
nicenum(old_class_space, oldnumbuf, sizeof (oldnumbuf));
nicenum(new_class_space, newnumbuf, sizeof (newnumbuf));
(void) printf("%s grew from %s to %s\n",
spa->spa_name, oldnumbuf, newnumbuf);
}
spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&ztest_vdev_lock);
mutex_exit(&ztest_checkpoint_lock);
}
/*
* Verify that dmu_objset_{create,destroy,open,close} work as expected.
*/
static void
ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
{
(void) arg, (void) cr;
/*
* Create the objects common to all ztest datasets.
*/
VERIFY0(zap_create_claim(os, ZTEST_DIROBJ,
DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx));
}
static int
ztest_dataset_create(char *dsname)
{
int err;
uint64_t rand;
dsl_crypto_params_t *dcp = NULL;
/*
* 50% of the time, we create encrypted datasets
* using a random cipher suite and a hard-coded
* wrapping key.
*/
rand = ztest_random(2);
if (rand != 0) {
nvlist_t *crypto_args = fnvlist_alloc();
nvlist_t *props = fnvlist_alloc();
/* slight bias towards the default cipher suite */
rand = ztest_random(ZIO_CRYPT_FUNCTIONS);
if (rand < ZIO_CRYPT_AES_128_CCM)
rand = ZIO_CRYPT_ON;
fnvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_ENCRYPTION), rand);
fnvlist_add_uint8_array(crypto_args, "wkeydata",
(uint8_t *)ztest_wkeydata, WRAPPING_KEY_LEN);
/*
* These parameters aren't really used by the kernel. They
* are simply stored so that userspace knows how to load
* the wrapping key.
*/
fnvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), ZFS_KEYFORMAT_RAW);
fnvlist_add_string(props,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), "prompt");
fnvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), 0ULL);
fnvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), 0ULL);
VERIFY0(dsl_crypto_params_create_nvlist(DCP_CMD_NONE, props,
crypto_args, &dcp));
/*
* Cycle through all available encryption implementations
* to verify interoperability.
*/
VERIFY0(gcm_impl_set("cycle"));
VERIFY0(aes_impl_set("cycle"));
fnvlist_free(crypto_args);
fnvlist_free(props);
}
err = dmu_objset_create(dsname, DMU_OST_OTHER, 0, dcp,
ztest_objset_create_cb, NULL);
dsl_crypto_params_free(dcp, !!err);
rand = ztest_random(100);
if (err || rand < 80)
return (err);
if (ztest_opts.zo_verbose >= 5)
(void) printf("Setting dataset %s to sync always\n", dsname);
return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC,
ZFS_SYNC_ALWAYS, B_FALSE));
}
static int
ztest_objset_destroy_cb(const char *name, void *arg)
{
(void) arg;
objset_t *os;
dmu_object_info_t doi;
int error;
/*
* Verify that the dataset contains a directory object.
*/
VERIFY0(ztest_dmu_objset_own(name, DMU_OST_OTHER, B_TRUE,
B_TRUE, FTAG, &os));
error = dmu_object_info(os, ZTEST_DIROBJ, &doi);
if (error != ENOENT) {
/* We could have crashed in the middle of destroying it */
ASSERT0(error);
ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER);
ASSERT3S(doi.doi_physical_blocks_512, >=, 0);
}
dmu_objset_disown(os, B_TRUE, FTAG);
/*
* Destroy the dataset.
*/
if (strchr(name, '@') != NULL) {
error = dsl_destroy_snapshot(name, B_TRUE);
if (error != ECHRNG) {
/*
* The program was executed, but encountered a runtime
* error, such as insufficient slop, or a hold on the
* dataset.
*/
ASSERT0(error);
}
} else {
error = dsl_destroy_head(name);
if (error == ENOSPC) {
/* There could be checkpoint or insufficient slop */
ztest_record_enospc(FTAG);
} else if (error != EBUSY) {
/* There could be a hold on this dataset */
ASSERT0(error);
}
}
return (0);
}
static boolean_t
ztest_snapshot_create(char *osname, uint64_t id)
{
char snapname[ZFS_MAX_DATASET_NAME_LEN];
int error;
(void) snprintf(snapname, sizeof (snapname), "%"PRIu64"", id);
error = dmu_objset_snapshot_one(osname, snapname);
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
return (B_FALSE);
}
if (error != 0 && error != EEXIST && error != ECHRNG) {
fatal(B_FALSE, "ztest_snapshot_create(%s@%s) = %d", osname,
snapname, error);
}
return (B_TRUE);
}
static boolean_t
ztest_snapshot_destroy(char *osname, uint64_t id)
{
char snapname[ZFS_MAX_DATASET_NAME_LEN];
int error;
(void) snprintf(snapname, sizeof (snapname), "%s@%"PRIu64"",
osname, id);
error = dsl_destroy_snapshot(snapname, B_FALSE);
if (error != 0 && error != ENOENT && error != ECHRNG)
fatal(B_FALSE, "ztest_snapshot_destroy(%s) = %d",
snapname, error);
return (B_TRUE);
}
void
ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id)
{
(void) zd;
ztest_ds_t *zdtmp;
int iters;
int error;
objset_t *os, *os2;
char name[ZFS_MAX_DATASET_NAME_LEN];
zilog_t *zilog;
int i;
zdtmp = umem_alloc(sizeof (ztest_ds_t), UMEM_NOFAIL);
(void) pthread_rwlock_rdlock(&ztest_name_lock);
(void) snprintf(name, sizeof (name), "%s/temp_%"PRIu64"",
ztest_opts.zo_pool, id);
/*
* If this dataset exists from a previous run, process its replay log
* half of the time. If we don't replay it, then dsl_destroy_head()
* (invoked from ztest_objset_destroy_cb()) should just throw it away.
*/
if (ztest_random(2) == 0 &&
ztest_dmu_objset_own(name, DMU_OST_OTHER, B_FALSE,
B_TRUE, FTAG, &os) == 0) {
ztest_zd_init(zdtmp, NULL, os);
zil_replay(os, zdtmp, ztest_replay_vector);
ztest_zd_fini(zdtmp);
dmu_objset_disown(os, B_TRUE, FTAG);
}
/*
* There may be an old instance of the dataset we're about to
* create lying around from a previous run. If so, destroy it
* and all of its snapshots.
*/
(void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
/*
* Verify that the destroyed dataset is no longer in the namespace.
* It may still be present if the destroy above fails with ENOSPC.
*/
error = ztest_dmu_objset_own(name, DMU_OST_OTHER, B_TRUE, B_TRUE,
FTAG, &os);
if (error == 0) {
dmu_objset_disown(os, B_TRUE, FTAG);
ztest_record_enospc(FTAG);
goto out;
}
VERIFY3U(ENOENT, ==, error);
/*
* Verify that we can create a new dataset.
*/
error = ztest_dataset_create(name);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
fatal(B_FALSE, "dmu_objset_create(%s) = %d", name, error);
}
VERIFY0(ztest_dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, B_TRUE,
FTAG, &os));
ztest_zd_init(zdtmp, NULL, os);
/*
* Open the intent log for it.
*/
zilog = zil_open(os, ztest_get_data, NULL);
/*
* Put some objects in there, do a little I/O to them,
* and randomly take a couple of snapshots along the way.
*/
iters = ztest_random(5);
for (i = 0; i < iters; i++) {
ztest_dmu_object_alloc_free(zdtmp, id);
if (ztest_random(iters) == 0)
(void) ztest_snapshot_create(name, i);
}
/*
* Verify that we cannot create an existing dataset.
*/
VERIFY3U(EEXIST, ==,
dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL, NULL));
/*
* Verify that we can hold an objset that is also owned.
*/
VERIFY0(dmu_objset_hold(name, FTAG, &os2));
dmu_objset_rele(os2, FTAG);
/*
* Verify that we cannot own an objset that is already owned.
*/
VERIFY3U(EBUSY, ==, ztest_dmu_objset_own(name, DMU_OST_OTHER,
B_FALSE, B_TRUE, FTAG, &os2));
zil_close(zilog);
dmu_objset_disown(os, B_TRUE, FTAG);
ztest_zd_fini(zdtmp);
out:
(void) pthread_rwlock_unlock(&ztest_name_lock);
umem_free(zdtmp, sizeof (ztest_ds_t));
}
/*
* Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
*/
void
ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id)
{
(void) pthread_rwlock_rdlock(&ztest_name_lock);
(void) ztest_snapshot_destroy(zd->zd_name, id);
(void) ztest_snapshot_create(zd->zd_name, id);
(void) pthread_rwlock_unlock(&ztest_name_lock);
}
/*
* Cleanup non-standard snapshots and clones.
*/
static void
ztest_dsl_dataset_cleanup(char *osname, uint64_t id)
{
char *snap1name;
char *clone1name;
char *snap2name;
char *clone2name;
char *snap3name;
int error;
snap1name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
clone1name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
snap2name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
clone2name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
snap3name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
(void) snprintf(snap1name, ZFS_MAX_DATASET_NAME_LEN, "%s@s1_%"PRIu64"",
osname, id);
(void) snprintf(clone1name, ZFS_MAX_DATASET_NAME_LEN, "%s/c1_%"PRIu64"",
osname, id);
(void) snprintf(snap2name, ZFS_MAX_DATASET_NAME_LEN, "%s@s2_%"PRIu64"",
clone1name, id);
(void) snprintf(clone2name, ZFS_MAX_DATASET_NAME_LEN, "%s/c2_%"PRIu64"",
osname, id);
(void) snprintf(snap3name, ZFS_MAX_DATASET_NAME_LEN, "%s@s3_%"PRIu64"",
clone1name, id);
error = dsl_destroy_head(clone2name);
if (error && error != ENOENT)
fatal(B_FALSE, "dsl_destroy_head(%s) = %d", clone2name, error);
error = dsl_destroy_snapshot(snap3name, B_FALSE);
if (error && error != ENOENT)
fatal(B_FALSE, "dsl_destroy_snapshot(%s) = %d",
snap3name, error);
error = dsl_destroy_snapshot(snap2name, B_FALSE);
if (error && error != ENOENT)
fatal(B_FALSE, "dsl_destroy_snapshot(%s) = %d",
snap2name, error);
error = dsl_destroy_head(clone1name);
if (error && error != ENOENT)
fatal(B_FALSE, "dsl_destroy_head(%s) = %d", clone1name, error);
error = dsl_destroy_snapshot(snap1name, B_FALSE);
if (error && error != ENOENT)
fatal(B_FALSE, "dsl_destroy_snapshot(%s) = %d",
snap1name, error);
umem_free(snap1name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(clone1name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(snap2name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(clone2name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(snap3name, ZFS_MAX_DATASET_NAME_LEN);
}
/*
* Verify dsl_dataset_promote handles EBUSY
*/
void
ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id)
{
objset_t *os;
char *snap1name;
char *clone1name;
char *snap2name;
char *clone2name;
char *snap3name;
char *osname = zd->zd_name;
int error;
snap1name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
clone1name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
snap2name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
clone2name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
snap3name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
(void) pthread_rwlock_rdlock(&ztest_name_lock);
ztest_dsl_dataset_cleanup(osname, id);
(void) snprintf(snap1name, ZFS_MAX_DATASET_NAME_LEN, "%s@s1_%"PRIu64"",
osname, id);
(void) snprintf(clone1name, ZFS_MAX_DATASET_NAME_LEN, "%s/c1_%"PRIu64"",
osname, id);
(void) snprintf(snap2name, ZFS_MAX_DATASET_NAME_LEN, "%s@s2_%"PRIu64"",
clone1name, id);
(void) snprintf(clone2name, ZFS_MAX_DATASET_NAME_LEN, "%s/c2_%"PRIu64"",
osname, id);
(void) snprintf(snap3name, ZFS_MAX_DATASET_NAME_LEN, "%s@s3_%"PRIu64"",
clone1name, id);
error = dmu_objset_snapshot_one(osname, strchr(snap1name, '@') + 1);
if (error && error != EEXIST) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
fatal(B_FALSE, "dmu_take_snapshot(%s) = %d", snap1name, error);
}
error = dmu_objset_clone(clone1name, snap1name);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
fatal(B_FALSE, "dmu_objset_create(%s) = %d", clone1name, error);
}
error = dmu_objset_snapshot_one(clone1name, strchr(snap2name, '@') + 1);
if (error && error != EEXIST) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
fatal(B_FALSE, "dmu_open_snapshot(%s) = %d", snap2name, error);
}
error = dmu_objset_snapshot_one(clone1name, strchr(snap3name, '@') + 1);
if (error && error != EEXIST) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
fatal(B_FALSE, "dmu_open_snapshot(%s) = %d", snap3name, error);
}
error = dmu_objset_clone(clone2name, snap3name);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
fatal(B_FALSE, "dmu_objset_create(%s) = %d", clone2name, error);
}
error = ztest_dmu_objset_own(snap2name, DMU_OST_ANY, B_TRUE, B_TRUE,
FTAG, &os);
if (error)
fatal(B_FALSE, "dmu_objset_own(%s) = %d", snap2name, error);
error = dsl_dataset_promote(clone2name, NULL);
if (error == ENOSPC) {
dmu_objset_disown(os, B_TRUE, FTAG);
ztest_record_enospc(FTAG);
goto out;
}
if (error != EBUSY)
fatal(B_FALSE, "dsl_dataset_promote(%s), %d, not EBUSY",
clone2name, error);
dmu_objset_disown(os, B_TRUE, FTAG);
out:
ztest_dsl_dataset_cleanup(osname, id);
(void) pthread_rwlock_unlock(&ztest_name_lock);
umem_free(snap1name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(clone1name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(snap2name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(clone2name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(snap3name, ZFS_MAX_DATASET_NAME_LEN);
}
#undef OD_ARRAY_SIZE
#define OD_ARRAY_SIZE 4
/*
* Verify that dmu_object_{alloc,free} work as expected.
*/
void
ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id)
{
ztest_od_t *od;
int batchsize;
int size;
int b;
size = sizeof (ztest_od_t) * OD_ARRAY_SIZE;
od = umem_alloc(size, UMEM_NOFAIL);
batchsize = OD_ARRAY_SIZE;
for (b = 0; b < batchsize; b++)
ztest_od_init(od + b, id, FTAG, b, DMU_OT_UINT64_OTHER,
0, 0, 0);
/*
* Destroy the previous batch of objects, create a new batch,
* and do some I/O on the new objects.
*/
if (ztest_object_init(zd, od, size, B_TRUE) != 0) {
zd->zd_od = NULL;
umem_free(od, size);
return;
}
while (ztest_random(4 * batchsize) != 0)
ztest_io(zd, od[ztest_random(batchsize)].od_object,
ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
umem_free(od, size);
}
/*
* Rewind the global allocator to verify object allocation backfilling.
*/
void
ztest_dmu_object_next_chunk(ztest_ds_t *zd, uint64_t id)
{
(void) id;
objset_t *os = zd->zd_os;
uint_t dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift;
uint64_t object;
/*
* Rewind the global allocator randomly back to a lower object number
* to force backfilling and reclamation of recently freed dnodes.
*/
mutex_enter(&os->os_obj_lock);
object = ztest_random(os->os_obj_next_chunk);
os->os_obj_next_chunk = P2ALIGN(object, dnodes_per_chunk);
mutex_exit(&os->os_obj_lock);
}
#undef OD_ARRAY_SIZE
#define OD_ARRAY_SIZE 2
/*
* Verify that dmu_{read,write} work as expected.
*/
void
ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
{
int size;
ztest_od_t *od;
objset_t *os = zd->zd_os;
size = sizeof (ztest_od_t) * OD_ARRAY_SIZE;
od = umem_alloc(size, UMEM_NOFAIL);
dmu_tx_t *tx;
int freeit, error;
uint64_t i, n, s, txg;
bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT;
uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t);
uint64_t regions = 997;
uint64_t stride = 123456789ULL;
uint64_t width = 40;
int free_percent = 5;
/*
* This test uses two objects, packobj and bigobj, that are always
* updated together (i.e. in the same tx) so that their contents are
* in sync and can be compared. Their contents relate to each other
* in a simple way: packobj is a dense array of 'bufwad' structures,
* while bigobj is a sparse array of the same bufwads. Specifically,
* for any index n, there are three bufwads that should be identical:
*
* packobj, at offset n * sizeof (bufwad_t)
* bigobj, at the head of the nth chunk
* bigobj, at the tail of the nth chunk
*
* The chunk size is arbitrary. It doesn't have to be a power of two,
* and it doesn't have any relation to the object blocksize.
* The only requirement is that it can hold at least two bufwads.
*
* Normally, we write the bufwad to each of these locations.
* However, free_percent of the time we instead write zeroes to
* packobj and perform a dmu_free_range() on bigobj. By comparing
* bigobj to packobj, we can verify that the DMU is correctly
* tracking which parts of an object are allocated and free,
* and that the contents of the allocated blocks are correct.
*/
/*
* Read the directory info. If it's the first time, set things up.
*/
ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, chunksize);
ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, 0,
chunksize);
if (ztest_object_init(zd, od, size, B_FALSE) != 0) {
umem_free(od, size);
return;
}
bigobj = od[0].od_object;
packobj = od[1].od_object;
chunksize = od[0].od_gen;
ASSERT3U(chunksize, ==, od[1].od_gen);
/*
* Prefetch a random chunk of the big object.
* Our aim here is to get some async reads in flight
* for blocks that we may free below; the DMU should
* handle this race correctly.
*/
n = ztest_random(regions) * stride + ztest_random(width);
s = 1 + ztest_random(2 * width - 1);
dmu_prefetch(os, bigobj, 0, n * chunksize, s * chunksize,
ZIO_PRIORITY_SYNC_READ);
/*
* Pick a random index and compute the offsets into packobj and bigobj.
*/
n = ztest_random(regions) * stride + ztest_random(width);
s = 1 + ztest_random(width - 1);
packoff = n * sizeof (bufwad_t);
packsize = s * sizeof (bufwad_t);
bigoff = n * chunksize;
bigsize = s * chunksize;
packbuf = umem_alloc(packsize, UMEM_NOFAIL);
bigbuf = umem_alloc(bigsize, UMEM_NOFAIL);
/*
* free_percent of the time, free a range of bigobj rather than
* overwriting it.
*/
freeit = (ztest_random(100) < free_percent);
/*
* Read the current contents of our objects.
*/
error = dmu_read(os, packobj, packoff, packsize, packbuf,
DMU_READ_PREFETCH);
ASSERT0(error);
error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf,
DMU_READ_PREFETCH);
ASSERT0(error);
/*
* Get a tx for the mods to both packobj and bigobj.
*/
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, packobj, packoff, packsize);
if (freeit)
dmu_tx_hold_free(tx, bigobj, bigoff, bigsize);
else
dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
/* This accounts for setting the checksum/compression. */
dmu_tx_hold_bonus(tx, bigobj);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0) {
umem_free(packbuf, packsize);
umem_free(bigbuf, bigsize);
umem_free(od, size);
return;
}
enum zio_checksum cksum;
do {
cksum = (enum zio_checksum)
ztest_random_dsl_prop(ZFS_PROP_CHECKSUM);
} while (cksum >= ZIO_CHECKSUM_LEGACY_FUNCTIONS);
dmu_object_set_checksum(os, bigobj, cksum, tx);
enum zio_compress comp;
do {
comp = (enum zio_compress)
ztest_random_dsl_prop(ZFS_PROP_COMPRESSION);
} while (comp >= ZIO_COMPRESS_LEGACY_FUNCTIONS);
dmu_object_set_compress(os, bigobj, comp, tx);
/*
* For each index from n to n + s, verify that the existing bufwad
* in packobj matches the bufwads at the head and tail of the
* corresponding chunk in bigobj. Then update all three bufwads
* with the new values we want to write out.
*/
for (i = 0; i < s; i++) {
/* LINTED */
pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
/* LINTED */
bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
/* LINTED */
bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
ASSERT3U((uintptr_t)bigH - (uintptr_t)bigbuf, <, bigsize);
ASSERT3U((uintptr_t)bigT - (uintptr_t)bigbuf, <, bigsize);
if (pack->bw_txg > txg)
fatal(B_FALSE,
"future leak: got %"PRIx64", open txg is %"PRIx64"",
pack->bw_txg, txg);
if (pack->bw_data != 0 && pack->bw_index != n + i)
fatal(B_FALSE, "wrong index: "
"got %"PRIx64", wanted %"PRIx64"+%"PRIx64"",
pack->bw_index, n, i);
if (memcmp(pack, bigH, sizeof (bufwad_t)) != 0)
fatal(B_FALSE, "pack/bigH mismatch in %p/%p",
pack, bigH);
if (memcmp(pack, bigT, sizeof (bufwad_t)) != 0)
fatal(B_FALSE, "pack/bigT mismatch in %p/%p",
pack, bigT);
if (freeit) {
memset(pack, 0, sizeof (bufwad_t));
} else {
pack->bw_index = n + i;
pack->bw_txg = txg;
pack->bw_data = 1 + ztest_random(-2ULL);
}
*bigH = *pack;
*bigT = *pack;
}
/*
* We've verified all the old bufwads, and made new ones.
* Now write them out.
*/
dmu_write(os, packobj, packoff, packsize, packbuf, tx);
if (freeit) {
if (ztest_opts.zo_verbose >= 7) {
(void) printf("freeing offset %"PRIx64" size %"PRIx64""
" txg %"PRIx64"\n",
bigoff, bigsize, txg);
}
VERIFY0(dmu_free_range(os, bigobj, bigoff, bigsize, tx));
} else {
if (ztest_opts.zo_verbose >= 7) {
(void) printf("writing offset %"PRIx64" size %"PRIx64""
" txg %"PRIx64"\n",
bigoff, bigsize, txg);
}
dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx);
}
dmu_tx_commit(tx);
/*
* Sanity check the stuff we just wrote.
*/
{
void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
VERIFY0(dmu_read(os, packobj, packoff,
packsize, packcheck, DMU_READ_PREFETCH));
VERIFY0(dmu_read(os, bigobj, bigoff,
bigsize, bigcheck, DMU_READ_PREFETCH));
ASSERT0(memcmp(packbuf, packcheck, packsize));
ASSERT0(memcmp(bigbuf, bigcheck, bigsize));
umem_free(packcheck, packsize);
umem_free(bigcheck, bigsize);
}
umem_free(packbuf, packsize);
umem_free(bigbuf, bigsize);
umem_free(od, size);
}
static void
compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf,
uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg)
{
uint64_t i;
bufwad_t *pack;
bufwad_t *bigH;
bufwad_t *bigT;
/*
* For each index from n to n + s, verify that the existing bufwad
* in packobj matches the bufwads at the head and tail of the
* corresponding chunk in bigobj. Then update all three bufwads
* with the new values we want to write out.
*/
for (i = 0; i < s; i++) {
/* LINTED */
pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
/* LINTED */
bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
/* LINTED */
bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
ASSERT3U((uintptr_t)bigH - (uintptr_t)bigbuf, <, bigsize);
ASSERT3U((uintptr_t)bigT - (uintptr_t)bigbuf, <, bigsize);
if (pack->bw_txg > txg)
fatal(B_FALSE,
"future leak: got %"PRIx64", open txg is %"PRIx64"",
pack->bw_txg, txg);
if (pack->bw_data != 0 && pack->bw_index != n + i)
fatal(B_FALSE, "wrong index: "
"got %"PRIx64", wanted %"PRIx64"+%"PRIx64"",
pack->bw_index, n, i);
if (memcmp(pack, bigH, sizeof (bufwad_t)) != 0)
fatal(B_FALSE, "pack/bigH mismatch in %p/%p",
pack, bigH);
if (memcmp(pack, bigT, sizeof (bufwad_t)) != 0)
fatal(B_FALSE, "pack/bigT mismatch in %p/%p",
pack, bigT);
pack->bw_index = n + i;
pack->bw_txg = txg;
pack->bw_data = 1 + ztest_random(-2ULL);
*bigH = *pack;
*bigT = *pack;
}
}
#undef OD_ARRAY_SIZE
#define OD_ARRAY_SIZE 2
void
ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
ztest_od_t *od;
dmu_tx_t *tx;
uint64_t i;
int error;
int size;
uint64_t n, s, txg;
bufwad_t *packbuf, *bigbuf;
uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
uint64_t blocksize = ztest_random_blocksize();
uint64_t chunksize = blocksize;
uint64_t regions = 997;
uint64_t stride = 123456789ULL;
uint64_t width = 9;
dmu_buf_t *bonus_db;
arc_buf_t **bigbuf_arcbufs;
dmu_object_info_t doi;
size = sizeof (ztest_od_t) * OD_ARRAY_SIZE;
od = umem_alloc(size, UMEM_NOFAIL);
/*
* This test uses two objects, packobj and bigobj, that are always
* updated together (i.e. in the same tx) so that their contents are
* in sync and can be compared. Their contents relate to each other
* in a simple way: packobj is a dense array of 'bufwad' structures,
* while bigobj is a sparse array of the same bufwads. Specifically,
* for any index n, there are three bufwads that should be identical:
*
* packobj, at offset n * sizeof (bufwad_t)
* bigobj, at the head of the nth chunk
* bigobj, at the tail of the nth chunk
*
* The chunk size is set equal to bigobj block size so that
* dmu_assign_arcbuf_by_dbuf() can be tested for object updates.
*/
/*
* Read the directory info. If it's the first time, set things up.
*/
ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0, 0);
ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, 0,
chunksize);
if (ztest_object_init(zd, od, size, B_FALSE) != 0) {
umem_free(od, size);
return;
}
bigobj = od[0].od_object;
packobj = od[1].od_object;
blocksize = od[0].od_blocksize;
chunksize = blocksize;
ASSERT3U(chunksize, ==, od[1].od_gen);
VERIFY0(dmu_object_info(os, bigobj, &doi));
VERIFY(ISP2(doi.doi_data_block_size));
VERIFY3U(chunksize, ==, doi.doi_data_block_size);
VERIFY3U(chunksize, >=, 2 * sizeof (bufwad_t));
/*
* Pick a random index and compute the offsets into packobj and bigobj.
*/
n = ztest_random(regions) * stride + ztest_random(width);
s = 1 + ztest_random(width - 1);
packoff = n * sizeof (bufwad_t);
packsize = s * sizeof (bufwad_t);
bigoff = n * chunksize;
bigsize = s * chunksize;
packbuf = umem_zalloc(packsize, UMEM_NOFAIL);
bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL);
VERIFY0(dmu_bonus_hold(os, bigobj, FTAG, &bonus_db));
bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL);
/*
* Iteration 0 test zcopy for DB_UNCACHED dbufs.
* Iteration 1 test zcopy to already referenced dbufs.
* Iteration 2 test zcopy to dirty dbuf in the same txg.
* Iteration 3 test zcopy to dbuf dirty in previous txg.
* Iteration 4 test zcopy when dbuf is no longer dirty.
* Iteration 5 test zcopy when it can't be done.
* Iteration 6 one more zcopy write.
*/
for (i = 0; i < 7; i++) {
uint64_t j;
uint64_t off;
/*
* In iteration 5 (i == 5) use arcbufs
* that don't match bigobj blksz to test
* dmu_assign_arcbuf_by_dbuf() when it can't directly
* assign an arcbuf to a dbuf.
*/
for (j = 0; j < s; j++) {
if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) {
bigbuf_arcbufs[j] =
dmu_request_arcbuf(bonus_db, chunksize);
} else {
bigbuf_arcbufs[2 * j] =
dmu_request_arcbuf(bonus_db, chunksize / 2);
bigbuf_arcbufs[2 * j + 1] =
dmu_request_arcbuf(bonus_db, chunksize / 2);
}
}
/*
* Get a tx for the mods to both packobj and bigobj.
*/
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, packobj, packoff, packsize);
dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0) {
umem_free(packbuf, packsize);
umem_free(bigbuf, bigsize);
for (j = 0; j < s; j++) {
if (i != 5 ||
chunksize < (SPA_MINBLOCKSIZE * 2)) {
dmu_return_arcbuf(bigbuf_arcbufs[j]);
} else {
dmu_return_arcbuf(
bigbuf_arcbufs[2 * j]);
dmu_return_arcbuf(
bigbuf_arcbufs[2 * j + 1]);
}
}
umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
umem_free(od, size);
dmu_buf_rele(bonus_db, FTAG);
return;
}
/*
* 50% of the time don't read objects in the 1st iteration to
* test dmu_assign_arcbuf_by_dbuf() for the case when there are
* no existing dbufs for the specified offsets.
*/
if (i != 0 || ztest_random(2) != 0) {
error = dmu_read(os, packobj, packoff,
packsize, packbuf, DMU_READ_PREFETCH);
ASSERT0(error);
error = dmu_read(os, bigobj, bigoff, bigsize,
bigbuf, DMU_READ_PREFETCH);
ASSERT0(error);
}
compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize,
n, chunksize, txg);
/*
* We've verified all the old bufwads, and made new ones.
* Now write them out.
*/
dmu_write(os, packobj, packoff, packsize, packbuf, tx);
if (ztest_opts.zo_verbose >= 7) {
(void) printf("writing offset %"PRIx64" size %"PRIx64""
" txg %"PRIx64"\n",
bigoff, bigsize, txg);
}
for (off = bigoff, j = 0; j < s; j++, off += chunksize) {
dmu_buf_t *dbt;
if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) {
memcpy(bigbuf_arcbufs[j]->b_data,
(caddr_t)bigbuf + (off - bigoff),
chunksize);
} else {
memcpy(bigbuf_arcbufs[2 * j]->b_data,
(caddr_t)bigbuf + (off - bigoff),
chunksize / 2);
memcpy(bigbuf_arcbufs[2 * j + 1]->b_data,
(caddr_t)bigbuf + (off - bigoff) +
chunksize / 2,
chunksize / 2);
}
if (i == 1) {
VERIFY(dmu_buf_hold(os, bigobj, off,
FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0);
}
if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) {
VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db,
off, bigbuf_arcbufs[j], tx));
} else {
VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db,
off, bigbuf_arcbufs[2 * j], tx));
VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db,
off + chunksize / 2,
bigbuf_arcbufs[2 * j + 1], tx));
}
if (i == 1) {
dmu_buf_rele(dbt, FTAG);
}
}
dmu_tx_commit(tx);
/*
* Sanity check the stuff we just wrote.
*/
{
void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
VERIFY0(dmu_read(os, packobj, packoff,
packsize, packcheck, DMU_READ_PREFETCH));
VERIFY0(dmu_read(os, bigobj, bigoff,
bigsize, bigcheck, DMU_READ_PREFETCH));
ASSERT0(memcmp(packbuf, packcheck, packsize));
ASSERT0(memcmp(bigbuf, bigcheck, bigsize));
umem_free(packcheck, packsize);
umem_free(bigcheck, bigsize);
}
if (i == 2) {
txg_wait_open(dmu_objset_pool(os), 0, B_TRUE);
} else if (i == 3) {
txg_wait_synced(dmu_objset_pool(os), 0);
}
}
dmu_buf_rele(bonus_db, FTAG);
umem_free(packbuf, packsize);
umem_free(bigbuf, bigsize);
umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
umem_free(od, size);
}
void
ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id)
{
(void) id;
ztest_od_t *od;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
uint64_t offset = (1ULL << (ztest_random(20) + 43)) +
(ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
/*
* Have multiple threads write to large offsets in an object
* to verify that parallel writes to an object -- even to the
* same blocks within the object -- doesn't cause any trouble.
*/
ztest_od_init(od, ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0)
return;
while (ztest_random(10) != 0)
ztest_io(zd, od->od_object, offset);
umem_free(od, sizeof (ztest_od_t));
}
void
ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id)
{
ztest_od_t *od;
uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) +
(ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
uint64_t count = ztest_random(20) + 1;
uint64_t blocksize = ztest_random_blocksize();
void *data;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t),
!ztest_random(2)) != 0) {
umem_free(od, sizeof (ztest_od_t));
return;
}
if (ztest_truncate(zd, od->od_object, offset, count * blocksize) != 0) {
umem_free(od, sizeof (ztest_od_t));
return;
}
ztest_prealloc(zd, od->od_object, offset, count * blocksize);
data = umem_zalloc(blocksize, UMEM_NOFAIL);
while (ztest_random(count) != 0) {
uint64_t randoff = offset + (ztest_random(count) * blocksize);
if (ztest_write(zd, od->od_object, randoff, blocksize,
data) != 0)
break;
while (ztest_random(4) != 0)
ztest_io(zd, od->od_object, randoff);
}
umem_free(data, blocksize);
umem_free(od, sizeof (ztest_od_t));
}
/*
* Verify that zap_{create,destroy,add,remove,update} work as expected.
*/
#define ZTEST_ZAP_MIN_INTS 1
#define ZTEST_ZAP_MAX_INTS 4
#define ZTEST_ZAP_MAX_PROPS 1000
void
ztest_zap(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
ztest_od_t *od;
uint64_t object;
uint64_t txg, last_txg;
uint64_t value[ZTEST_ZAP_MAX_INTS];
uint64_t zl_ints, zl_intsize, prop;
int i, ints;
dmu_tx_t *tx;
char propname[100], txgname[100];
int error;
const char *const hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t),
!ztest_random(2)) != 0)
goto out;
object = od->od_object;
/*
* Generate a known hash collision, and verify that
* we can lookup and remove both entries.
*/
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0)
goto out;
for (i = 0; i < 2; i++) {
value[i] = i;
VERIFY0(zap_add(os, object, hc[i], sizeof (uint64_t),
1, &value[i], tx));
}
for (i = 0; i < 2; i++) {
VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i],
sizeof (uint64_t), 1, &value[i], tx));
VERIFY0(
zap_length(os, object, hc[i], &zl_intsize, &zl_ints));
ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
ASSERT3U(zl_ints, ==, 1);
}
for (i = 0; i < 2; i++) {
VERIFY0(zap_remove(os, object, hc[i], tx));
}
dmu_tx_commit(tx);
/*
* Generate a bunch of random entries.
*/
ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS);
prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
(void) sprintf(propname, "prop_%"PRIu64"", prop);
(void) sprintf(txgname, "txg_%"PRIu64"", prop);
memset(value, 0, sizeof (value));
last_txg = 0;
/*
* If these zap entries already exist, validate their contents.
*/
error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
if (error == 0) {
ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
ASSERT3U(zl_ints, ==, 1);
VERIFY0(zap_lookup(os, object, txgname, zl_intsize,
zl_ints, &last_txg));
VERIFY0(zap_length(os, object, propname, &zl_intsize,
&zl_ints));
ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
ASSERT3U(zl_ints, ==, ints);
VERIFY0(zap_lookup(os, object, propname, zl_intsize,
zl_ints, value));
for (i = 0; i < ints; i++) {
ASSERT3U(value[i], ==, last_txg + object + i);
}
} else {
ASSERT3U(error, ==, ENOENT);
}
/*
* Atomically update two entries in our zap object.
* The first is named txg_%llu, and contains the txg
* in which the property was last updated. The second
* is named prop_%llu, and the nth element of its value
* should be txg + object + n.
*/
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0)
goto out;
if (last_txg > txg)
fatal(B_FALSE, "zap future leak: old %"PRIu64" new %"PRIu64"",
last_txg, txg);
for (i = 0; i < ints; i++)
value[i] = txg + object + i;
VERIFY0(zap_update(os, object, txgname, sizeof (uint64_t),
1, &txg, tx));
VERIFY0(zap_update(os, object, propname, sizeof (uint64_t),
ints, value, tx));
dmu_tx_commit(tx);
/*
* Remove a random pair of entries.
*/
prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
(void) sprintf(propname, "prop_%"PRIu64"", prop);
(void) sprintf(txgname, "txg_%"PRIu64"", prop);
error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
if (error == ENOENT)
goto out;
ASSERT0(error);
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0)
goto out;
VERIFY0(zap_remove(os, object, txgname, tx));
VERIFY0(zap_remove(os, object, propname, tx));
dmu_tx_commit(tx);
out:
umem_free(od, sizeof (ztest_od_t));
}
/*
* Test case to test the upgrading of a microzap to fatzap.
*/
void
ztest_fzap(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
ztest_od_t *od;
uint64_t object, txg, value;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t),
!ztest_random(2)) != 0)
goto out;
object = od->od_object;
/*
* Add entries to this ZAP and make sure it spills over
* and gets upgraded to a fatzap. Also, since we are adding
* 2050 entries we should see ptrtbl growth and leaf-block split.
*/
for (value = 0; value < 2050; value++) {
char name[ZFS_MAX_DATASET_NAME_LEN];
dmu_tx_t *tx;
int error;
(void) snprintf(name, sizeof (name), "fzap-%"PRIu64"-%"PRIu64"",
id, value);
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, name);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0)
goto out;
error = zap_add(os, object, name, sizeof (uint64_t), 1,
&value, tx);
ASSERT(error == 0 || error == EEXIST);
dmu_tx_commit(tx);
}
out:
umem_free(od, sizeof (ztest_od_t));
}
void
ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
{
(void) id;
objset_t *os = zd->zd_os;
ztest_od_t *od;
uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc;
dmu_tx_t *tx;
int i, namelen, error;
int micro = ztest_random(2);
char name[20], string_value[20];
void *data;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
ztest_od_init(od, ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
umem_free(od, sizeof (ztest_od_t));
return;
}
object = od->od_object;
/*
* Generate a random name of the form 'xxx.....' where each
* x is a random printable character and the dots are dots.
* There are 94 such characters, and the name length goes from
* 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names.
*/
namelen = ztest_random(sizeof (name) - 5) + 5 + 1;
for (i = 0; i < 3; i++)
name[i] = '!' + ztest_random('~' - '!' + 1);
for (; i < namelen - 1; i++)
name[i] = '.';
name[i] = '\0';
if ((namelen & 1) || micro) {
wsize = sizeof (txg);
wc = 1;
data = &txg;
} else {
wsize = 1;
wc = namelen;
data = string_value;
}
count = -1ULL;
VERIFY0(zap_count(os, object, &count));
ASSERT3S(count, !=, -1ULL);
/*
* Select an operation: length, lookup, add, update, remove.
*/
i = ztest_random(5);
if (i >= 2) {
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0) {
umem_free(od, sizeof (ztest_od_t));
return;
}
memcpy(string_value, name, namelen);
} else {
tx = NULL;
txg = 0;
memset(string_value, 0, namelen);
}
switch (i) {
case 0:
error = zap_length(os, object, name, &zl_wsize, &zl_wc);
if (error == 0) {
ASSERT3U(wsize, ==, zl_wsize);
ASSERT3U(wc, ==, zl_wc);
} else {
ASSERT3U(error, ==, ENOENT);
}
break;
case 1:
error = zap_lookup(os, object, name, wsize, wc, data);
if (error == 0) {
if (data == string_value &&
memcmp(name, data, namelen) != 0)
fatal(B_FALSE, "name '%s' != val '%s' len %d",
name, (char *)data, namelen);
} else {
ASSERT3U(error, ==, ENOENT);
}
break;
case 2:
error = zap_add(os, object, name, wsize, wc, data, tx);
ASSERT(error == 0 || error == EEXIST);
break;
case 3:
VERIFY0(zap_update(os, object, name, wsize, wc, data, tx));
break;
case 4:
error = zap_remove(os, object, name, tx);
ASSERT(error == 0 || error == ENOENT);
break;
}
if (tx != NULL)
dmu_tx_commit(tx);
umem_free(od, sizeof (ztest_od_t));
}
/*
* Commit callback data.
*/
typedef struct ztest_cb_data {
list_node_t zcd_node;
uint64_t zcd_txg;
int zcd_expected_err;
boolean_t zcd_added;
boolean_t zcd_called;
spa_t *zcd_spa;
} ztest_cb_data_t;
/* This is the actual commit callback function */
static void
ztest_commit_callback(void *arg, int error)
{
ztest_cb_data_t *data = arg;
uint64_t synced_txg;
VERIFY3P(data, !=, NULL);
VERIFY3S(data->zcd_expected_err, ==, error);
VERIFY(!data->zcd_called);
synced_txg = spa_last_synced_txg(data->zcd_spa);
if (data->zcd_txg > synced_txg)
fatal(B_FALSE,
"commit callback of txg %"PRIu64" called prematurely, "
"last synced txg = %"PRIu64"\n",
data->zcd_txg, synced_txg);
data->zcd_called = B_TRUE;
if (error == ECANCELED) {
ASSERT0(data->zcd_txg);
ASSERT(!data->zcd_added);
/*
* The private callback data should be destroyed here, but
* since we are going to check the zcd_called field after
* dmu_tx_abort(), we will destroy it there.
*/
return;
}
ASSERT(data->zcd_added);
ASSERT3U(data->zcd_txg, !=, 0);
(void) mutex_enter(&zcl.zcl_callbacks_lock);
/* See if this cb was called more quickly */
if ((synced_txg - data->zcd_txg) < zc_min_txg_delay)
zc_min_txg_delay = synced_txg - data->zcd_txg;
/* Remove our callback from the list */
list_remove(&zcl.zcl_callbacks, data);
(void) mutex_exit(&zcl.zcl_callbacks_lock);
umem_free(data, sizeof (ztest_cb_data_t));
}
/* Allocate and initialize callback data structure */
static ztest_cb_data_t *
ztest_create_cb_data(objset_t *os, uint64_t txg)
{
ztest_cb_data_t *cb_data;
cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL);
cb_data->zcd_txg = txg;
cb_data->zcd_spa = dmu_objset_spa(os);
list_link_init(&cb_data->zcd_node);
return (cb_data);
}
/*
* Commit callback test.
*/
void
ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
ztest_od_t *od;
dmu_tx_t *tx;
ztest_cb_data_t *cb_data[3], *tmp_cb;
uint64_t old_txg, txg;
int i, error = 0;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
umem_free(od, sizeof (ztest_od_t));
return;
}
tx = dmu_tx_create(os);
cb_data[0] = ztest_create_cb_data(os, 0);
dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]);
dmu_tx_hold_write(tx, od->od_object, 0, sizeof (uint64_t));
/* Every once in a while, abort the transaction on purpose */
if (ztest_random(100) == 0)
error = -1;
if (!error)
error = dmu_tx_assign(tx, TXG_NOWAIT);
txg = error ? 0 : dmu_tx_get_txg(tx);
cb_data[0]->zcd_txg = txg;
cb_data[1] = ztest_create_cb_data(os, txg);
dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]);
if (error) {
/*
* It's not a strict requirement to call the registered
* callbacks from inside dmu_tx_abort(), but that's what
* it's supposed to happen in the current implementation
* so we will check for that.
*/
for (i = 0; i < 2; i++) {
cb_data[i]->zcd_expected_err = ECANCELED;
VERIFY(!cb_data[i]->zcd_called);
}
dmu_tx_abort(tx);
for (i = 0; i < 2; i++) {
VERIFY(cb_data[i]->zcd_called);
umem_free(cb_data[i], sizeof (ztest_cb_data_t));
}
umem_free(od, sizeof (ztest_od_t));
return;
}
cb_data[2] = ztest_create_cb_data(os, txg);
dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]);
/*
* Read existing data to make sure there isn't a future leak.
*/
VERIFY0(dmu_read(os, od->od_object, 0, sizeof (uint64_t),
&old_txg, DMU_READ_PREFETCH));
if (old_txg > txg)
fatal(B_FALSE,
"future leak: got %"PRIu64", open txg is %"PRIu64"",
old_txg, txg);
dmu_write(os, od->od_object, 0, sizeof (uint64_t), &txg, tx);
(void) mutex_enter(&zcl.zcl_callbacks_lock);
/*
* Since commit callbacks don't have any ordering requirement and since
* it is theoretically possible for a commit callback to be called
* after an arbitrary amount of time has elapsed since its txg has been
* synced, it is difficult to reliably determine whether a commit
* callback hasn't been called due to high load or due to a flawed
* implementation.
*
* In practice, we will assume that if after a certain number of txgs a
* commit callback hasn't been called, then most likely there's an
* implementation bug..
*/
tmp_cb = list_head(&zcl.zcl_callbacks);
if (tmp_cb != NULL &&
tmp_cb->zcd_txg + ZTEST_COMMIT_CB_THRESH < txg) {
fatal(B_FALSE,
"Commit callback threshold exceeded, "
"oldest txg: %"PRIu64", open txg: %"PRIu64"\n",
tmp_cb->zcd_txg, txg);
}
/*
* Let's find the place to insert our callbacks.
*
* Even though the list is ordered by txg, it is possible for the
* insertion point to not be the end because our txg may already be
* quiescing at this point and other callbacks in the open txg
* (from other objsets) may have sneaked in.
*/
tmp_cb = list_tail(&zcl.zcl_callbacks);
while (tmp_cb != NULL && tmp_cb->zcd_txg > txg)
tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb);
/* Add the 3 callbacks to the list */
for (i = 0; i < 3; i++) {
if (tmp_cb == NULL)
list_insert_head(&zcl.zcl_callbacks, cb_data[i]);
else
list_insert_after(&zcl.zcl_callbacks, tmp_cb,
cb_data[i]);
cb_data[i]->zcd_added = B_TRUE;
VERIFY(!cb_data[i]->zcd_called);
tmp_cb = cb_data[i];
}
zc_cb_counter += 3;
(void) mutex_exit(&zcl.zcl_callbacks_lock);
dmu_tx_commit(tx);
umem_free(od, sizeof (ztest_od_t));
}
/*
* Visit each object in the dataset. Verify that its properties
* are consistent what was stored in the block tag when it was created,
* and that its unused bonus buffer space has not been overwritten.
*/
void
ztest_verify_dnode_bt(ztest_ds_t *zd, uint64_t id)
{
(void) id;
objset_t *os = zd->zd_os;
uint64_t obj;
int err = 0;
for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) {
ztest_block_tag_t *bt = NULL;
dmu_object_info_t doi;
dmu_buf_t *db;
ztest_object_lock(zd, obj, RL_READER);
if (dmu_bonus_hold(os, obj, FTAG, &db) != 0) {
ztest_object_unlock(zd, obj);
continue;
}
dmu_object_info_from_db(db, &doi);
if (doi.doi_bonus_size >= sizeof (*bt))
bt = ztest_bt_bonus(db);
if (bt && bt->bt_magic == BT_MAGIC) {
ztest_bt_verify(bt, os, obj, doi.doi_dnodesize,
bt->bt_offset, bt->bt_gen, bt->bt_txg,
bt->bt_crtxg);
ztest_verify_unused_bonus(db, bt, obj, os, bt->bt_gen);
}
dmu_buf_rele(db, FTAG);
ztest_object_unlock(zd, obj);
}
}
void
ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id)
{
(void) id;
zfs_prop_t proplist[] = {
ZFS_PROP_CHECKSUM,
ZFS_PROP_COMPRESSION,
ZFS_PROP_COPIES,
ZFS_PROP_DEDUP
};
(void) pthread_rwlock_rdlock(&ztest_name_lock);
for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++) {
int error = ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p],
ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2));
ASSERT(error == 0 || error == ENOSPC);
}
int error = ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_RECORDSIZE,
ztest_random_blocksize(), (int)ztest_random(2));
ASSERT(error == 0 || error == ENOSPC);
(void) pthread_rwlock_unlock(&ztest_name_lock);
}
void
ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
nvlist_t *props = NULL;
(void) pthread_rwlock_rdlock(&ztest_name_lock);
(void) ztest_spa_prop_set_uint64(ZPOOL_PROP_AUTOTRIM, ztest_random(2));
VERIFY0(spa_prop_get(ztest_spa, &props));
if (ztest_opts.zo_verbose >= 6)
dump_nvlist(props, 4);
fnvlist_free(props);
(void) pthread_rwlock_unlock(&ztest_name_lock);
}
static int
user_release_one(const char *snapname, const char *holdname)
{
nvlist_t *snaps, *holds;
int error;
snaps = fnvlist_alloc();
holds = fnvlist_alloc();
fnvlist_add_boolean(holds, holdname);
fnvlist_add_nvlist(snaps, snapname, holds);
fnvlist_free(holds);
error = dsl_dataset_user_release(snaps, NULL);
fnvlist_free(snaps);
return (error);
}
/*
* Test snapshot hold/release and deferred destroy.
*/
void
ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id)
{
int error;
objset_t *os = zd->zd_os;
objset_t *origin;
char snapname[100];
char fullname[100];
char clonename[100];
char tag[100];
char osname[ZFS_MAX_DATASET_NAME_LEN];
nvlist_t *holds;
(void) pthread_rwlock_rdlock(&ztest_name_lock);
dmu_objset_name(os, osname);
(void) snprintf(snapname, sizeof (snapname), "sh1_%"PRIu64"", id);
(void) snprintf(fullname, sizeof (fullname), "%s@%s", osname, snapname);
(void) snprintf(clonename, sizeof (clonename), "%s/ch1_%"PRIu64"",
osname, id);
(void) snprintf(tag, sizeof (tag), "tag_%"PRIu64"", id);
/*
* Clean up from any previous run.
*/
error = dsl_destroy_head(clonename);
if (error != ENOENT)
ASSERT0(error);
error = user_release_one(fullname, tag);
if (error != ESRCH && error != ENOENT)
ASSERT0(error);
error = dsl_destroy_snapshot(fullname, B_FALSE);
if (error != ENOENT)
ASSERT0(error);
/*
* Create snapshot, clone it, mark snap for deferred destroy,
* destroy clone, verify snap was also destroyed.
*/
error = dmu_objset_snapshot_one(osname, snapname);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc("dmu_objset_snapshot");
goto out;
}
fatal(B_FALSE, "dmu_objset_snapshot(%s) = %d", fullname, error);
}
error = dmu_objset_clone(clonename, fullname);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc("dmu_objset_clone");
goto out;
}
fatal(B_FALSE, "dmu_objset_clone(%s) = %d", clonename, error);
}
error = dsl_destroy_snapshot(fullname, B_TRUE);
if (error) {
fatal(B_FALSE, "dsl_destroy_snapshot(%s, B_TRUE) = %d",
fullname, error);
}
error = dsl_destroy_head(clonename);
if (error)
fatal(B_FALSE, "dsl_destroy_head(%s) = %d", clonename, error);
error = dmu_objset_hold(fullname, FTAG, &origin);
if (error != ENOENT)
fatal(B_FALSE, "dmu_objset_hold(%s) = %d", fullname, error);
/*
* Create snapshot, add temporary hold, verify that we can't
* destroy a held snapshot, mark for deferred destroy,
* release hold, verify snapshot was destroyed.
*/
error = dmu_objset_snapshot_one(osname, snapname);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc("dmu_objset_snapshot");
goto out;
}
fatal(B_FALSE, "dmu_objset_snapshot(%s) = %d", fullname, error);
}
holds = fnvlist_alloc();
fnvlist_add_string(holds, fullname, tag);
error = dsl_dataset_user_hold(holds, 0, NULL);
fnvlist_free(holds);
if (error == ENOSPC) {
ztest_record_enospc("dsl_dataset_user_hold");
goto out;
} else if (error) {
fatal(B_FALSE, "dsl_dataset_user_hold(%s, %s) = %u",
fullname, tag, error);
}
error = dsl_destroy_snapshot(fullname, B_FALSE);
if (error != EBUSY) {
fatal(B_FALSE, "dsl_destroy_snapshot(%s, B_FALSE) = %d",
fullname, error);
}
error = dsl_destroy_snapshot(fullname, B_TRUE);
if (error) {
fatal(B_FALSE, "dsl_destroy_snapshot(%s, B_TRUE) = %d",
fullname, error);
}
error = user_release_one(fullname, tag);
if (error)
fatal(B_FALSE, "user_release_one(%s, %s) = %d",
fullname, tag, error);
VERIFY3U(dmu_objset_hold(fullname, FTAG, &origin), ==, ENOENT);
out:
(void) pthread_rwlock_unlock(&ztest_name_lock);
}
/*
* Inject random faults into the on-disk data.
*/
void
ztest_fault_inject(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
int fd;
uint64_t offset;
uint64_t leaves;
uint64_t bad = 0x1990c0ffeedecadeull;
uint64_t top, leaf;
char *path0;
char *pathrand;
size_t fsize;
int bshift = SPA_MAXBLOCKSHIFT + 2;
int iters = 1000;
int maxfaults;
int mirror_save;
vdev_t *vd0 = NULL;
uint64_t guid0 = 0;
boolean_t islog = B_FALSE;
path0 = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
pathrand = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
mutex_enter(&ztest_vdev_lock);
/*
* Device removal is in progress, fault injection must be disabled
* until it completes and the pool is scrubbed. The fault injection
* strategy for damaging blocks does not take in to account evacuated
* blocks which may have already been damaged.
*/
if (ztest_device_removal_active) {
mutex_exit(&ztest_vdev_lock);
goto out;
}
maxfaults = MAXFAULTS(zs);
leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raid_children;
mirror_save = zs->zs_mirrors;
mutex_exit(&ztest_vdev_lock);
ASSERT3U(leaves, >=, 1);
/*
* While ztest is running the number of leaves will not change. This
* is critical for the fault injection logic as it determines where
* errors can be safely injected such that they are always repairable.
*
* When restarting ztest a different number of leaves may be requested
* which will shift the regions to be damaged. This is fine as long
* as the pool has been scrubbed prior to using the new mapping.
* Failure to do can result in non-repairable damage being injected.
*/
if (ztest_pool_scrubbed == B_FALSE)
goto out;
/*
* Grab the name lock as reader. There are some operations
* which don't like to have their vdevs changed while
* they are in progress (i.e. spa_change_guid). Those
* operations will have grabbed the name lock as writer.
*/
(void) pthread_rwlock_rdlock(&ztest_name_lock);
/*
* We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
if (ztest_random(2) == 0) {
/*
* Inject errors on a normal data device or slog device.
*/
top = ztest_random_vdev_top(spa, B_TRUE);
leaf = ztest_random(leaves) + zs->zs_splits;
/*
* Generate paths to the first leaf in this top-level vdev,
* and to the random leaf we selected. We'll induce transient
* write failures and random online/offline activity on leaf 0,
* and we'll write random garbage to the randomly chosen leaf.
*/
(void) snprintf(path0, MAXPATHLEN, ztest_dev_template,
ztest_opts.zo_dir, ztest_opts.zo_pool,
top * leaves + zs->zs_splits);
(void) snprintf(pathrand, MAXPATHLEN, ztest_dev_template,
ztest_opts.zo_dir, ztest_opts.zo_pool,
top * leaves + leaf);
vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0);
if (vd0 != NULL && vd0->vdev_top->vdev_islog)
islog = B_TRUE;
/*
* If the top-level vdev needs to be resilvered
* then we only allow faults on the device that is
* resilvering.
*/
if (vd0 != NULL && maxfaults != 1 &&
(!vdev_resilver_needed(vd0->vdev_top, NULL, NULL) ||
vd0->vdev_resilver_txg != 0)) {
/*
* Make vd0 explicitly claim to be unreadable,
* or unwritable, or reach behind its back
* and close the underlying fd. We can do this if
* maxfaults == 0 because we'll fail and reexecute,
* and we can do it if maxfaults >= 2 because we'll
* have enough redundancy. If maxfaults == 1, the
* combination of this with injection of random data
* corruption below exceeds the pool's fault tolerance.
*/
vdev_file_t *vf = vd0->vdev_tsd;
zfs_dbgmsg("injecting fault to vdev %llu; maxfaults=%d",
(long long)vd0->vdev_id, (int)maxfaults);
if (vf != NULL && ztest_random(3) == 0) {
(void) close(vf->vf_file->f_fd);
vf->vf_file->f_fd = -1;
} else if (ztest_random(2) == 0) {
vd0->vdev_cant_read = B_TRUE;
} else {
vd0->vdev_cant_write = B_TRUE;
}
guid0 = vd0->vdev_guid;
}
} else {
/*
* Inject errors on an l2cache device.
*/
spa_aux_vdev_t *sav = &spa->spa_l2cache;
if (sav->sav_count == 0) {
spa_config_exit(spa, SCL_STATE, FTAG);
(void) pthread_rwlock_unlock(&ztest_name_lock);
goto out;
}
vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)];
guid0 = vd0->vdev_guid;
(void) strlcpy(path0, vd0->vdev_path, MAXPATHLEN);
(void) strlcpy(pathrand, vd0->vdev_path, MAXPATHLEN);
leaf = 0;
leaves = 1;
maxfaults = INT_MAX; /* no limit on cache devices */
}
spa_config_exit(spa, SCL_STATE, FTAG);
(void) pthread_rwlock_unlock(&ztest_name_lock);
/*
* If we can tolerate two or more faults, or we're dealing
* with a slog, randomly online/offline vd0.
*/
if ((maxfaults >= 2 || islog) && guid0 != 0) {
if (ztest_random(10) < 6) {
int flags = (ztest_random(2) == 0 ?
ZFS_OFFLINE_TEMPORARY : 0);
/*
* We have to grab the zs_name_lock as writer to
* prevent a race between offlining a slog and
* destroying a dataset. Offlining the slog will
* grab a reference on the dataset which may cause
* dsl_destroy_head() to fail with EBUSY thus
* leaving the dataset in an inconsistent state.
*/
if (islog)
(void) pthread_rwlock_wrlock(&ztest_name_lock);
VERIFY3U(vdev_offline(spa, guid0, flags), !=, EBUSY);
if (islog)
(void) pthread_rwlock_unlock(&ztest_name_lock);
} else {
/*
* Ideally we would like to be able to randomly
* call vdev_[on|off]line without holding locks
* to force unpredictable failures but the side
* effects of vdev_[on|off]line prevent us from
* doing so. We grab the ztest_vdev_lock here to
* prevent a race between injection testing and
* aux_vdev removal.
*/
mutex_enter(&ztest_vdev_lock);
(void) vdev_online(spa, guid0, 0, NULL);
mutex_exit(&ztest_vdev_lock);
}
}
if (maxfaults == 0)
goto out;
/*
* We have at least single-fault tolerance, so inject data corruption.
*/
fd = open(pathrand, O_RDWR);
if (fd == -1) /* we hit a gap in the device namespace */
goto out;
fsize = lseek(fd, 0, SEEK_END);
while (--iters != 0) {
/*
* The offset must be chosen carefully to ensure that
* we do not inject a given logical block with errors
* on two different leaf devices, because ZFS can not
* tolerate that (if maxfaults==1).
*
* To achieve this we divide each leaf device into
* chunks of size (# leaves * SPA_MAXBLOCKSIZE * 4).
* Each chunk is further divided into error-injection
* ranges (can accept errors) and clear ranges (we do
* not inject errors in those). Each error-injection
* range can accept errors only for a single leaf vdev.
* Error-injection ranges are separated by clear ranges.
*
* For example, with 3 leaves, each chunk looks like:
* 0 to 32M: injection range for leaf 0
* 32M to 64M: clear range - no injection allowed
* 64M to 96M: injection range for leaf 1
* 96M to 128M: clear range - no injection allowed
* 128M to 160M: injection range for leaf 2
* 160M to 192M: clear range - no injection allowed
*
* Each clear range must be large enough such that a
* single block cannot straddle it. This way a block
* can't be a target in two different injection ranges
* (on different leaf vdevs).
*/
offset = ztest_random(fsize / (leaves << bshift)) *
(leaves << bshift) + (leaf << bshift) +
(ztest_random(1ULL << (bshift - 1)) & -8ULL);
/*
* Only allow damage to the labels at one end of the vdev.
*
* If all labels are damaged, the device will be totally
* inaccessible, which will result in loss of data,
* because we also damage (parts of) the other side of
* the mirror/raidz.
*
* Additionally, we will always have both an even and an
* odd label, so that we can handle crashes in the
* middle of vdev_config_sync().
*/
if ((leaf & 1) == 0 && offset < VDEV_LABEL_START_SIZE)
continue;
/*
* The two end labels are stored at the "end" of the disk, but
* the end of the disk (vdev_psize) is aligned to
* sizeof (vdev_label_t).
*/
uint64_t psize = P2ALIGN(fsize, sizeof (vdev_label_t));
if ((leaf & 1) == 1 &&
offset + sizeof (bad) > psize - VDEV_LABEL_END_SIZE)
continue;
mutex_enter(&ztest_vdev_lock);
if (mirror_save != zs->zs_mirrors) {
mutex_exit(&ztest_vdev_lock);
(void) close(fd);
goto out;
}
if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad))
fatal(B_TRUE,
"can't inject bad word at 0x%"PRIx64" in %s",
offset, pathrand);
mutex_exit(&ztest_vdev_lock);
if (ztest_opts.zo_verbose >= 7)
(void) printf("injected bad word into %s,"
" offset 0x%"PRIx64"\n", pathrand, offset);
}
(void) close(fd);
out:
umem_free(path0, MAXPATHLEN);
umem_free(pathrand, MAXPATHLEN);
}
/*
* By design ztest will never inject uncorrectable damage in to the pool.
* Issue a scrub, wait for it to complete, and verify there is never any
* persistent damage.
*
* Only after a full scrub has been completed is it safe to start injecting
* data corruption. See the comment in zfs_fault_inject().
*/
static int
ztest_scrub_impl(spa_t *spa)
{
int error = spa_scan(spa, POOL_SCAN_SCRUB);
if (error)
return (error);
while (dsl_scan_scrubbing(spa_get_dsl(spa)))
txg_wait_synced(spa_get_dsl(spa), 0);
if (spa_approx_errlog_size(spa) > 0)
return (ECKSUM);
ztest_pool_scrubbed = B_TRUE;
return (0);
}
/*
* Scrub the pool.
*/
void
ztest_scrub(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
spa_t *spa = ztest_spa;
int error;
/*
* Scrub in progress by device removal.
*/
if (ztest_device_removal_active)
return;
/*
* Start a scrub, wait a moment, then force a restart.
*/
(void) spa_scan(spa, POOL_SCAN_SCRUB);
(void) poll(NULL, 0, 100);
error = ztest_scrub_impl(spa);
if (error == EBUSY)
error = 0;
ASSERT0(error);
}
/*
* Change the guid for the pool.
*/
void
ztest_reguid(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
spa_t *spa = ztest_spa;
uint64_t orig, load;
int error;
if (ztest_opts.zo_mmp_test)
return;
orig = spa_guid(spa);
load = spa_load_guid(spa);
(void) pthread_rwlock_wrlock(&ztest_name_lock);
error = spa_change_guid(spa);
(void) pthread_rwlock_unlock(&ztest_name_lock);
if (error != 0)
return;
if (ztest_opts.zo_verbose >= 4) {
(void) printf("Changed guid old %"PRIu64" -> %"PRIu64"\n",
orig, spa_guid(spa));
}
VERIFY3U(orig, !=, spa_guid(spa));
VERIFY3U(load, ==, spa_load_guid(spa));
}
void
ztest_blake3(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
hrtime_t end = gethrtime() + NANOSEC;
zio_cksum_salt_t salt;
void *salt_ptr = &salt.zcs_bytes;
struct abd *abd_data, *abd_meta;
void *buf, *templ;
int i, *ptr;
uint32_t size;
BLAKE3_CTX ctx;
const zfs_impl_t *blake3 = zfs_impl_get_ops("blake3");
size = ztest_random_blocksize();
buf = umem_alloc(size, UMEM_NOFAIL);
abd_data = abd_alloc(size, B_FALSE);
abd_meta = abd_alloc(size, B_TRUE);
for (i = 0, ptr = buf; i < size / sizeof (*ptr); i++, ptr++)
*ptr = ztest_random(UINT_MAX);
memset(salt_ptr, 'A', 32);
abd_copy_from_buf_off(abd_data, buf, 0, size);
abd_copy_from_buf_off(abd_meta, buf, 0, size);
while (gethrtime() <= end) {
int run_count = 100;
zio_cksum_t zc_ref1, zc_ref2;
zio_cksum_t zc_res1, zc_res2;
void *ref1 = &zc_ref1;
void *ref2 = &zc_ref2;
void *res1 = &zc_res1;
void *res2 = &zc_res2;
/* BLAKE3_KEY_LEN = 32 */
VERIFY0(blake3->setname("generic"));
templ = abd_checksum_blake3_tmpl_init(&salt);
Blake3_InitKeyed(&ctx, salt_ptr);
Blake3_Update(&ctx, buf, size);
Blake3_Final(&ctx, ref1);
zc_ref2 = zc_ref1;
ZIO_CHECKSUM_BSWAP(&zc_ref2);
abd_checksum_blake3_tmpl_free(templ);
VERIFY0(blake3->setname("cycle"));
while (run_count-- > 0) {
/* Test current implementation */
Blake3_InitKeyed(&ctx, salt_ptr);
Blake3_Update(&ctx, buf, size);
Blake3_Final(&ctx, res1);
zc_res2 = zc_res1;
ZIO_CHECKSUM_BSWAP(&zc_res2);
VERIFY0(memcmp(ref1, res1, 32));
VERIFY0(memcmp(ref2, res2, 32));
/* Test ABD - data */
templ = abd_checksum_blake3_tmpl_init(&salt);
abd_checksum_blake3_native(abd_data, size,
templ, &zc_res1);
abd_checksum_blake3_byteswap(abd_data, size,
templ, &zc_res2);
VERIFY0(memcmp(ref1, res1, 32));
VERIFY0(memcmp(ref2, res2, 32));
/* Test ABD - metadata */
abd_checksum_blake3_native(abd_meta, size,
templ, &zc_res1);
abd_checksum_blake3_byteswap(abd_meta, size,
templ, &zc_res2);
abd_checksum_blake3_tmpl_free(templ);
VERIFY0(memcmp(ref1, res1, 32));
VERIFY0(memcmp(ref2, res2, 32));
}
}
abd_free(abd_data);
abd_free(abd_meta);
umem_free(buf, size);
}
void
ztest_fletcher(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
hrtime_t end = gethrtime() + NANOSEC;
while (gethrtime() <= end) {
int run_count = 100;
void *buf;
struct abd *abd_data, *abd_meta;
uint32_t size;
int *ptr;
int i;
zio_cksum_t zc_ref;
zio_cksum_t zc_ref_byteswap;
size = ztest_random_blocksize();
buf = umem_alloc(size, UMEM_NOFAIL);
abd_data = abd_alloc(size, B_FALSE);
abd_meta = abd_alloc(size, B_TRUE);
for (i = 0, ptr = buf; i < size / sizeof (*ptr); i++, ptr++)
*ptr = ztest_random(UINT_MAX);
abd_copy_from_buf_off(abd_data, buf, 0, size);
abd_copy_from_buf_off(abd_meta, buf, 0, size);
VERIFY0(fletcher_4_impl_set("scalar"));
fletcher_4_native(buf, size, NULL, &zc_ref);
fletcher_4_byteswap(buf, size, NULL, &zc_ref_byteswap);
VERIFY0(fletcher_4_impl_set("cycle"));
while (run_count-- > 0) {
zio_cksum_t zc;
zio_cksum_t zc_byteswap;
fletcher_4_byteswap(buf, size, NULL, &zc_byteswap);
fletcher_4_native(buf, size, NULL, &zc);
VERIFY0(memcmp(&zc, &zc_ref, sizeof (zc)));
VERIFY0(memcmp(&zc_byteswap, &zc_ref_byteswap,
sizeof (zc_byteswap)));
/* Test ABD - data */
abd_fletcher_4_byteswap(abd_data, size, NULL,
&zc_byteswap);
abd_fletcher_4_native(abd_data, size, NULL, &zc);
VERIFY0(memcmp(&zc, &zc_ref, sizeof (zc)));
VERIFY0(memcmp(&zc_byteswap, &zc_ref_byteswap,
sizeof (zc_byteswap)));
/* Test ABD - metadata */
abd_fletcher_4_byteswap(abd_meta, size, NULL,
&zc_byteswap);
abd_fletcher_4_native(abd_meta, size, NULL, &zc);
VERIFY0(memcmp(&zc, &zc_ref, sizeof (zc)));
VERIFY0(memcmp(&zc_byteswap, &zc_ref_byteswap,
sizeof (zc_byteswap)));
}
umem_free(buf, size);
abd_free(abd_data);
abd_free(abd_meta);
}
}
void
ztest_fletcher_incr(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
void *buf;
size_t size;
int *ptr;
int i;
zio_cksum_t zc_ref;
zio_cksum_t zc_ref_bswap;
hrtime_t end = gethrtime() + NANOSEC;
while (gethrtime() <= end) {
int run_count = 100;
size = ztest_random_blocksize();
buf = umem_alloc(size, UMEM_NOFAIL);
for (i = 0, ptr = buf; i < size / sizeof (*ptr); i++, ptr++)
*ptr = ztest_random(UINT_MAX);
VERIFY0(fletcher_4_impl_set("scalar"));
fletcher_4_native(buf, size, NULL, &zc_ref);
fletcher_4_byteswap(buf, size, NULL, &zc_ref_bswap);
VERIFY0(fletcher_4_impl_set("cycle"));
while (run_count-- > 0) {
zio_cksum_t zc;
zio_cksum_t zc_bswap;
size_t pos = 0;
ZIO_SET_CHECKSUM(&zc, 0, 0, 0, 0);
ZIO_SET_CHECKSUM(&zc_bswap, 0, 0, 0, 0);
while (pos < size) {
size_t inc = 64 * ztest_random(size / 67);
/* sometimes add few bytes to test non-simd */
if (ztest_random(100) < 10)
inc += P2ALIGN(ztest_random(64),
sizeof (uint32_t));
if (inc > (size - pos))
inc = size - pos;
fletcher_4_incremental_native(buf + pos, inc,
&zc);
fletcher_4_incremental_byteswap(buf + pos, inc,
&zc_bswap);
pos += inc;
}
VERIFY3U(pos, ==, size);
VERIFY(ZIO_CHECKSUM_EQUAL(zc, zc_ref));
VERIFY(ZIO_CHECKSUM_EQUAL(zc_bswap, zc_ref_bswap));
/*
* verify if incremental on the whole buffer is
* equivalent to non-incremental version
*/
ZIO_SET_CHECKSUM(&zc, 0, 0, 0, 0);
ZIO_SET_CHECKSUM(&zc_bswap, 0, 0, 0, 0);
fletcher_4_incremental_native(buf, size, &zc);
fletcher_4_incremental_byteswap(buf, size, &zc_bswap);
VERIFY(ZIO_CHECKSUM_EQUAL(zc, zc_ref));
VERIFY(ZIO_CHECKSUM_EQUAL(zc_bswap, zc_ref_bswap));
}
umem_free(buf, size);
}
}
static int
ztest_set_global_vars(void)
{
for (size_t i = 0; i < ztest_opts.zo_gvars_count; i++) {
char *kv = ztest_opts.zo_gvars[i];
VERIFY3U(strlen(kv), <=, ZO_GVARS_MAX_ARGLEN);
VERIFY3U(strlen(kv), >, 0);
int err = set_global_var(kv);
if (ztest_opts.zo_verbose > 0) {
(void) printf("setting global var %s ... %s\n", kv,
err ? "failed" : "ok");
}
if (err != 0) {
(void) fprintf(stderr,
"failed to set global var '%s'\n", kv);
return (err);
}
}
return (0);
}
static char **
ztest_global_vars_to_zdb_args(void)
{
char **args = calloc(2*ztest_opts.zo_gvars_count + 1, sizeof (char *));
char **cur = args;
if (args == NULL)
return (NULL);
for (size_t i = 0; i < ztest_opts.zo_gvars_count; i++) {
*cur++ = (char *)"-o";
*cur++ = ztest_opts.zo_gvars[i];
}
ASSERT3P(cur, ==, &args[2*ztest_opts.zo_gvars_count]);
*cur = NULL;
return (args);
}
/* The end of strings is indicated by a NULL element */
static char *
join_strings(char **strings, const char *sep)
{
size_t totallen = 0;
for (char **sp = strings; *sp != NULL; sp++) {
totallen += strlen(*sp);
totallen += strlen(sep);
}
if (totallen > 0) {
ASSERT(totallen >= strlen(sep));
totallen -= strlen(sep);
}
size_t buflen = totallen + 1;
char *o = umem_alloc(buflen, UMEM_NOFAIL); /* trailing 0 byte */
o[0] = '\0';
for (char **sp = strings; *sp != NULL; sp++) {
size_t would;
would = strlcat(o, *sp, buflen);
VERIFY3U(would, <, buflen);
if (*(sp+1) == NULL) {
break;
}
would = strlcat(o, sep, buflen);
VERIFY3U(would, <, buflen);
}
ASSERT3S(strlen(o), ==, totallen);
return (o);
}
static int
ztest_check_path(char *path)
{
struct stat s;
/* return true on success */
return (!stat(path, &s));
}
static void
ztest_get_zdb_bin(char *bin, int len)
{
char *zdb_path;
/*
* Try to use $ZDB and in-tree zdb path. If not successful, just
* let popen to search through PATH.
*/
if ((zdb_path = getenv("ZDB"))) {
strlcpy(bin, zdb_path, len); /* In env */
if (!ztest_check_path(bin)) {
ztest_dump_core = 0;
fatal(B_TRUE, "invalid ZDB '%s'", bin);
}
return;
}
VERIFY3P(realpath(getexecname(), bin), !=, NULL);
if (strstr(bin, ".libs/ztest")) {
strstr(bin, ".libs/ztest")[0] = '\0'; /* In-tree */
strcat(bin, "zdb");
if (ztest_check_path(bin))
return;
}
strcpy(bin, "zdb");
}
static vdev_t *
ztest_random_concrete_vdev_leaf(vdev_t *vd)
{
if (vd == NULL)
return (NULL);
if (vd->vdev_children == 0)
return (vd);
vdev_t *eligible[vd->vdev_children];
int eligible_idx = 0, i;
for (i = 0; i < vd->vdev_children; i++) {
vdev_t *cvd = vd->vdev_child[i];
if (cvd->vdev_top->vdev_removing)
continue;
if (cvd->vdev_children > 0 ||
(vdev_is_concrete(cvd) && !cvd->vdev_detached)) {
eligible[eligible_idx++] = cvd;
}
}
VERIFY3S(eligible_idx, >, 0);
uint64_t child_no = ztest_random(eligible_idx);
return (ztest_random_concrete_vdev_leaf(eligible[child_no]));
}
void
ztest_initialize(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
spa_t *spa = ztest_spa;
int error = 0;
mutex_enter(&ztest_vdev_lock);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
/* Random leaf vdev */
vdev_t *rand_vd = ztest_random_concrete_vdev_leaf(spa->spa_root_vdev);
if (rand_vd == NULL) {
spa_config_exit(spa, SCL_VDEV, FTAG);
mutex_exit(&ztest_vdev_lock);
return;
}
/*
* The random vdev we've selected may change as soon as we
* drop the spa_config_lock. We create local copies of things
* we're interested in.
*/
uint64_t guid = rand_vd->vdev_guid;
char *path = strdup(rand_vd->vdev_path);
boolean_t active = rand_vd->vdev_initialize_thread != NULL;
zfs_dbgmsg("vd %px, guid %llu", rand_vd, (u_longlong_t)guid);
spa_config_exit(spa, SCL_VDEV, FTAG);
uint64_t cmd = ztest_random(POOL_INITIALIZE_FUNCS);
nvlist_t *vdev_guids = fnvlist_alloc();
nvlist_t *vdev_errlist = fnvlist_alloc();
fnvlist_add_uint64(vdev_guids, path, guid);
error = spa_vdev_initialize(spa, vdev_guids, cmd, vdev_errlist);
fnvlist_free(vdev_guids);
fnvlist_free(vdev_errlist);
switch (cmd) {
case POOL_INITIALIZE_CANCEL:
if (ztest_opts.zo_verbose >= 4) {
(void) printf("Cancel initialize %s", path);
if (!active)
(void) printf(" failed (no initialize active)");
(void) printf("\n");
}
break;
case POOL_INITIALIZE_START:
if (ztest_opts.zo_verbose >= 4) {
(void) printf("Start initialize %s", path);
if (active && error == 0)
(void) printf(" failed (already active)");
else if (error != 0)
(void) printf(" failed (error %d)", error);
(void) printf("\n");
}
break;
case POOL_INITIALIZE_SUSPEND:
if (ztest_opts.zo_verbose >= 4) {
(void) printf("Suspend initialize %s", path);
if (!active)
(void) printf(" failed (no initialize active)");
(void) printf("\n");
}
break;
}
free(path);
mutex_exit(&ztest_vdev_lock);
}
void
ztest_trim(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
spa_t *spa = ztest_spa;
int error = 0;
mutex_enter(&ztest_vdev_lock);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
/* Random leaf vdev */
vdev_t *rand_vd = ztest_random_concrete_vdev_leaf(spa->spa_root_vdev);
if (rand_vd == NULL) {
spa_config_exit(spa, SCL_VDEV, FTAG);
mutex_exit(&ztest_vdev_lock);
return;
}
/*
* The random vdev we've selected may change as soon as we
* drop the spa_config_lock. We create local copies of things
* we're interested in.
*/
uint64_t guid = rand_vd->vdev_guid;
char *path = strdup(rand_vd->vdev_path);
boolean_t active = rand_vd->vdev_trim_thread != NULL;
zfs_dbgmsg("vd %p, guid %llu", rand_vd, (u_longlong_t)guid);
spa_config_exit(spa, SCL_VDEV, FTAG);
uint64_t cmd = ztest_random(POOL_TRIM_FUNCS);
uint64_t rate = 1 << ztest_random(30);
boolean_t partial = (ztest_random(5) > 0);
boolean_t secure = (ztest_random(5) > 0);
nvlist_t *vdev_guids = fnvlist_alloc();
nvlist_t *vdev_errlist = fnvlist_alloc();
fnvlist_add_uint64(vdev_guids, path, guid);
error = spa_vdev_trim(spa, vdev_guids, cmd, rate, partial,
secure, vdev_errlist);
fnvlist_free(vdev_guids);
fnvlist_free(vdev_errlist);
switch (cmd) {
case POOL_TRIM_CANCEL:
if (ztest_opts.zo_verbose >= 4) {
(void) printf("Cancel TRIM %s", path);
if (!active)
(void) printf(" failed (no TRIM active)");
(void) printf("\n");
}
break;
case POOL_TRIM_START:
if (ztest_opts.zo_verbose >= 4) {
(void) printf("Start TRIM %s", path);
if (active && error == 0)
(void) printf(" failed (already active)");
else if (error != 0)
(void) printf(" failed (error %d)", error);
(void) printf("\n");
}
break;
case POOL_TRIM_SUSPEND:
if (ztest_opts.zo_verbose >= 4) {
(void) printf("Suspend TRIM %s", path);
if (!active)
(void) printf(" failed (no TRIM active)");
(void) printf("\n");
}
break;
}
free(path);
mutex_exit(&ztest_vdev_lock);
}
/*
* Verify pool integrity by running zdb.
*/
static void
ztest_run_zdb(const char *pool)
{
int status;
char *bin;
char *zdb;
char *zbuf;
const int len = MAXPATHLEN + MAXNAMELEN + 20;
FILE *fp;
bin = umem_alloc(len, UMEM_NOFAIL);
zdb = umem_alloc(len, UMEM_NOFAIL);
zbuf = umem_alloc(1024, UMEM_NOFAIL);
ztest_get_zdb_bin(bin, len);
char **set_gvars_args = ztest_global_vars_to_zdb_args();
if (set_gvars_args == NULL) {
fatal(B_FALSE, "Failed to allocate memory in "
"ztest_global_vars_to_zdb_args(). Cannot run zdb.\n");
}
char *set_gvars_args_joined = join_strings(set_gvars_args, " ");
free(set_gvars_args);
size_t would = snprintf(zdb, len,
"%s -bcc%s%s -G -d -Y -e -y %s -p %s %s",
bin,
ztest_opts.zo_verbose >= 3 ? "s" : "",
ztest_opts.zo_verbose >= 4 ? "v" : "",
set_gvars_args_joined,
ztest_opts.zo_dir,
pool);
ASSERT3U(would, <, len);
umem_free(set_gvars_args_joined, strlen(set_gvars_args_joined) + 1);
if (ztest_opts.zo_verbose >= 5)
(void) printf("Executing %s\n", zdb);
fp = popen(zdb, "r");
while (fgets(zbuf, 1024, fp) != NULL)
if (ztest_opts.zo_verbose >= 3)
(void) printf("%s", zbuf);
status = pclose(fp);
if (status == 0)
goto out;
ztest_dump_core = 0;
if (WIFEXITED(status))
fatal(B_FALSE, "'%s' exit code %d", zdb, WEXITSTATUS(status));
else
fatal(B_FALSE, "'%s' died with signal %d",
zdb, WTERMSIG(status));
out:
umem_free(bin, len);
umem_free(zdb, len);
umem_free(zbuf, 1024);
}
static void
ztest_walk_pool_directory(const char *header)
{
spa_t *spa = NULL;
if (ztest_opts.zo_verbose >= 6)
(void) puts(header);
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL)
if (ztest_opts.zo_verbose >= 6)
(void) printf("\t%s\n", spa_name(spa));
mutex_exit(&spa_namespace_lock);
}
static void
ztest_spa_import_export(char *oldname, char *newname)
{
nvlist_t *config, *newconfig;
uint64_t pool_guid;
spa_t *spa;
int error;
if (ztest_opts.zo_verbose >= 4) {
(void) printf("import/export: old = %s, new = %s\n",
oldname, newname);
}
/*
* Clean up from previous runs.
*/
(void) spa_destroy(newname);
/*
* Get the pool's configuration and guid.
*/
VERIFY0(spa_open(oldname, &spa, FTAG));
/*
* Kick off a scrub to tickle scrub/export races.
*/
if (ztest_random(2) == 0)
(void) spa_scan(spa, POOL_SCAN_SCRUB);
pool_guid = spa_guid(spa);
spa_close(spa, FTAG);
ztest_walk_pool_directory("pools before export");
/*
* Export it.
*/
VERIFY0(spa_export(oldname, &config, B_FALSE, B_FALSE));
ztest_walk_pool_directory("pools after export");
/*
* Try to import it.
*/
newconfig = spa_tryimport(config);
ASSERT3P(newconfig, !=, NULL);
fnvlist_free(newconfig);
/*
* Import it under the new name.
*/
error = spa_import(newname, config, NULL, 0);
if (error != 0) {
dump_nvlist(config, 0);
fatal(B_FALSE, "couldn't import pool %s as %s: error %u",
oldname, newname, error);
}
ztest_walk_pool_directory("pools after import");
/*
* Try to import it again -- should fail with EEXIST.
*/
VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0));
/*
* Try to import it under a different name -- should fail with EEXIST.
*/
VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0));
/*
* Verify that the pool is no longer visible under the old name.
*/
VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
/*
* Verify that we can open and close the pool using the new name.
*/
VERIFY0(spa_open(newname, &spa, FTAG));
ASSERT3U(pool_guid, ==, spa_guid(spa));
spa_close(spa, FTAG);
fnvlist_free(config);
}
static void
ztest_resume(spa_t *spa)
{
if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6)
(void) printf("resuming from suspended state\n");
spa_vdev_state_enter(spa, SCL_NONE);
vdev_clear(spa, NULL);
(void) spa_vdev_state_exit(spa, NULL, 0);
(void) zio_resume(spa);
}
static __attribute__((noreturn)) void
ztest_resume_thread(void *arg)
{
spa_t *spa = arg;
while (!ztest_exiting) {
if (spa_suspended(spa))
ztest_resume(spa);
(void) poll(NULL, 0, 100);
/*
* Periodically change the zfs_compressed_arc_enabled setting.
*/
if (ztest_random(10) == 0)
zfs_compressed_arc_enabled = ztest_random(2);
/*
* Periodically change the zfs_abd_scatter_enabled setting.
*/
if (ztest_random(10) == 0)
zfs_abd_scatter_enabled = ztest_random(2);
}
thread_exit();
}
static __attribute__((noreturn)) void
ztest_deadman_thread(void *arg)
{
ztest_shared_t *zs = arg;
spa_t *spa = ztest_spa;
hrtime_t delay, overdue, last_run = gethrtime();
delay = (zs->zs_thread_stop - zs->zs_thread_start) +
MSEC2NSEC(zfs_deadman_synctime_ms);
while (!ztest_exiting) {
/*
* Wait for the delay timer while checking occasionally
* if we should stop.
*/
if (gethrtime() < last_run + delay) {
(void) poll(NULL, 0, 1000);
continue;
}
/*
* If the pool is suspended then fail immediately. Otherwise,
* check to see if the pool is making any progress. If
* vdev_deadman() discovers that there hasn't been any recent
* I/Os then it will end up aborting the tests.
*/
if (spa_suspended(spa) || spa->spa_root_vdev == NULL) {
fatal(B_FALSE,
"aborting test after %llu seconds because "
"pool has transitioned to a suspended state.",
(u_longlong_t)zfs_deadman_synctime_ms / 1000);
}
vdev_deadman(spa->spa_root_vdev, FTAG);
/*
* If the process doesn't complete within a grace period of
* zfs_deadman_synctime_ms over the expected finish time,
* then it may be hung and is terminated.
*/
overdue = zs->zs_proc_stop + MSEC2NSEC(zfs_deadman_synctime_ms);
if (gethrtime() > overdue) {
fatal(B_FALSE,
"aborting test after %llu seconds because "
"the process is overdue for termination.",
(gethrtime() - zs->zs_proc_start) / NANOSEC);
}
(void) printf("ztest has been running for %lld seconds\n",
(gethrtime() - zs->zs_proc_start) / NANOSEC);
last_run = gethrtime();
delay = MSEC2NSEC(zfs_deadman_checktime_ms);
}
thread_exit();
}
static void
ztest_execute(int test, ztest_info_t *zi, uint64_t id)
{
ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets];
ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test);
hrtime_t functime = gethrtime();
int i;
for (i = 0; i < zi->zi_iters; i++)
zi->zi_func(zd, id);
functime = gethrtime() - functime;
atomic_add_64(&zc->zc_count, 1);
atomic_add_64(&zc->zc_time, functime);
if (ztest_opts.zo_verbose >= 4)
(void) printf("%6.2f sec in %s\n",
(double)functime / NANOSEC, zi->zi_funcname);
}
static __attribute__((noreturn)) void
ztest_thread(void *arg)
{
int rand;
uint64_t id = (uintptr_t)arg;
ztest_shared_t *zs = ztest_shared;
uint64_t call_next;
hrtime_t now;
ztest_info_t *zi;
ztest_shared_callstate_t *zc;
while ((now = gethrtime()) < zs->zs_thread_stop) {
/*
* See if it's time to force a crash.
*/
if (now > zs->zs_thread_kill)
ztest_kill(zs);
/*
* If we're getting ENOSPC with some regularity, stop.
*/
if (zs->zs_enospc_count > 10)
break;
/*
* Pick a random function to execute.
*/
rand = ztest_random(ZTEST_FUNCS);
zi = &ztest_info[rand];
zc = ZTEST_GET_SHARED_CALLSTATE(rand);
call_next = zc->zc_next;
if (now >= call_next &&
atomic_cas_64(&zc->zc_next, call_next, call_next +
ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) {
ztest_execute(rand, zi, id);
}
}
thread_exit();
}
static void
ztest_dataset_name(char *dsname, const char *pool, int d)
{
(void) snprintf(dsname, ZFS_MAX_DATASET_NAME_LEN, "%s/ds_%d", pool, d);
}
static void
ztest_dataset_destroy(int d)
{
char name[ZFS_MAX_DATASET_NAME_LEN];
int t;
ztest_dataset_name(name, ztest_opts.zo_pool, d);
if (ztest_opts.zo_verbose >= 3)
(void) printf("Destroying %s to free up space\n", name);
/*
* Cleanup any non-standard clones and snapshots. In general,
* ztest thread t operates on dataset (t % zopt_datasets),
* so there may be more than one thing to clean up.
*/
for (t = d; t < ztest_opts.zo_threads;
t += ztest_opts.zo_datasets)
ztest_dsl_dataset_cleanup(name, t);
(void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
}
static void
ztest_dataset_dirobj_verify(ztest_ds_t *zd)
{
uint64_t usedobjs, dirobjs, scratch;
/*
* ZTEST_DIROBJ is the object directory for the entire dataset.
* Therefore, the number of objects in use should equal the
* number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself.
* If not, we have an object leak.
*
* Note that we can only check this in ztest_dataset_open(),
* when the open-context and syncing-context values agree.
* That's because zap_count() returns the open-context value,
* while dmu_objset_space() returns the rootbp fill count.
*/
VERIFY0(zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs));
dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch);
ASSERT3U(dirobjs + 1, ==, usedobjs);
}
static int
ztest_dataset_open(int d)
{
ztest_ds_t *zd = &ztest_ds[d];
uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq;
objset_t *os;
zilog_t *zilog;
char name[ZFS_MAX_DATASET_NAME_LEN];
int error;
ztest_dataset_name(name, ztest_opts.zo_pool, d);
(void) pthread_rwlock_rdlock(&ztest_name_lock);
error = ztest_dataset_create(name);
if (error == ENOSPC) {
(void) pthread_rwlock_unlock(&ztest_name_lock);
ztest_record_enospc(FTAG);
return (error);
}
ASSERT(error == 0 || error == EEXIST);
VERIFY0(ztest_dmu_objset_own(name, DMU_OST_OTHER, B_FALSE,
B_TRUE, zd, &os));
(void) pthread_rwlock_unlock(&ztest_name_lock);
ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os);
zilog = zd->zd_zilog;
if (zilog->zl_header->zh_claim_lr_seq != 0 &&
zilog->zl_header->zh_claim_lr_seq < committed_seq)
fatal(B_FALSE, "missing log records: "
"claimed %"PRIu64" < committed %"PRIu64"",
zilog->zl_header->zh_claim_lr_seq, committed_seq);
ztest_dataset_dirobj_verify(zd);
zil_replay(os, zd, ztest_replay_vector);
ztest_dataset_dirobj_verify(zd);
if (ztest_opts.zo_verbose >= 6)
(void) printf("%s replay %"PRIu64" blocks, "
"%"PRIu64" records, seq %"PRIu64"\n",
zd->zd_name,
zilog->zl_parse_blk_count,
zilog->zl_parse_lr_count,
zilog->zl_replaying_seq);
zilog = zil_open(os, ztest_get_data, NULL);
if (zilog->zl_replaying_seq != 0 &&
zilog->zl_replaying_seq < committed_seq)
fatal(B_FALSE, "missing log records: "
"replayed %"PRIu64" < committed %"PRIu64"",
zilog->zl_replaying_seq, committed_seq);
return (0);
}
static void
ztest_dataset_close(int d)
{
ztest_ds_t *zd = &ztest_ds[d];
zil_close(zd->zd_zilog);
dmu_objset_disown(zd->zd_os, B_TRUE, zd);
ztest_zd_fini(zd);
}
static int
ztest_replay_zil_cb(const char *name, void *arg)
{
(void) arg;
objset_t *os;
ztest_ds_t *zdtmp;
VERIFY0(ztest_dmu_objset_own(name, DMU_OST_ANY, B_TRUE,
B_TRUE, FTAG, &os));
zdtmp = umem_alloc(sizeof (ztest_ds_t), UMEM_NOFAIL);
ztest_zd_init(zdtmp, NULL, os);
zil_replay(os, zdtmp, ztest_replay_vector);
ztest_zd_fini(zdtmp);
if (dmu_objset_zil(os)->zl_parse_lr_count != 0 &&
ztest_opts.zo_verbose >= 6) {
zilog_t *zilog = dmu_objset_zil(os);
(void) printf("%s replay %"PRIu64" blocks, "
"%"PRIu64" records, seq %"PRIu64"\n",
name,
zilog->zl_parse_blk_count,
zilog->zl_parse_lr_count,
zilog->zl_replaying_seq);
}
umem_free(zdtmp, sizeof (ztest_ds_t));
dmu_objset_disown(os, B_TRUE, FTAG);
return (0);
}
static void
ztest_freeze(void)
{
ztest_ds_t *zd = &ztest_ds[0];
spa_t *spa;
int numloops = 0;
if (ztest_opts.zo_verbose >= 3)
(void) printf("testing spa_freeze()...\n");
kernel_init(SPA_MODE_READ | SPA_MODE_WRITE);
VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
VERIFY0(ztest_dataset_open(0));
ztest_spa = spa;
/*
* Force the first log block to be transactionally allocated.
* We have to do this before we freeze the pool -- otherwise
* the log chain won't be anchored.
*/
while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) {
ztest_dmu_object_alloc_free(zd, 0);
zil_commit(zd->zd_zilog, 0);
}
txg_wait_synced(spa_get_dsl(spa), 0);
/*
* Freeze the pool. This stops spa_sync() from doing anything,
* so that the only way to record changes from now on is the ZIL.
*/
spa_freeze(spa);
/*
* Because it is hard to predict how much space a write will actually
* require beforehand, we leave ourselves some fudge space to write over
* capacity.
*/
uint64_t capacity = metaslab_class_get_space(spa_normal_class(spa)) / 2;
/*
* Run tests that generate log records but don't alter the pool config
* or depend on DSL sync tasks (snapshots, objset create/destroy, etc).
* We do a txg_wait_synced() after each iteration to force the txg
* to increase well beyond the last synced value in the uberblock.
* The ZIL should be OK with that.
*
* Run a random number of times less than zo_maxloops and ensure we do
* not run out of space on the pool.
*/
while (ztest_random(10) != 0 &&
numloops++ < ztest_opts.zo_maxloops &&
metaslab_class_get_alloc(spa_normal_class(spa)) < capacity) {
ztest_od_t od;
ztest_od_init(&od, 0, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, 0);
VERIFY0(ztest_object_init(zd, &od, sizeof (od), B_FALSE));
ztest_io(zd, od.od_object,
ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
txg_wait_synced(spa_get_dsl(spa), 0);
}
/*
* Commit all of the changes we just generated.
*/
zil_commit(zd->zd_zilog, 0);
txg_wait_synced(spa_get_dsl(spa), 0);
/*
* Close our dataset and close the pool.
*/
ztest_dataset_close(0);
spa_close(spa, FTAG);
kernel_fini();
/*
* Open and close the pool and dataset to induce log replay.
*/
kernel_init(SPA_MODE_READ | SPA_MODE_WRITE);
VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
ASSERT3U(spa_freeze_txg(spa), ==, UINT64_MAX);
VERIFY0(ztest_dataset_open(0));
ztest_spa = spa;
txg_wait_synced(spa_get_dsl(spa), 0);
ztest_dataset_close(0);
ztest_reguid(NULL, 0);
spa_close(spa, FTAG);
kernel_fini();
}
static void
ztest_import_impl(void)
{
importargs_t args = { 0 };
nvlist_t *cfg = NULL;
int nsearch = 1;
char *searchdirs[nsearch];
int flags = ZFS_IMPORT_MISSING_LOG;
searchdirs[0] = ztest_opts.zo_dir;
args.paths = nsearch;
args.path = searchdirs;
args.can_be_active = B_FALSE;
libpc_handle_t lpch = {
.lpc_lib_handle = NULL,
.lpc_ops = &libzpool_config_ops,
.lpc_printerr = B_TRUE
};
VERIFY0(zpool_find_config(&lpch, ztest_opts.zo_pool, &cfg, &args));
VERIFY0(spa_import(ztest_opts.zo_pool, cfg, NULL, flags));
fnvlist_free(cfg);
}
/*
* Import a storage pool with the given name.
*/
static void
ztest_import(ztest_shared_t *zs)
{
spa_t *spa;
mutex_init(&ztest_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ztest_checkpoint_lock, NULL, MUTEX_DEFAULT, NULL);
VERIFY0(pthread_rwlock_init(&ztest_name_lock, NULL));
kernel_init(SPA_MODE_READ | SPA_MODE_WRITE);
ztest_import_impl();
VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
zs->zs_metaslab_sz =
1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
spa_close(spa, FTAG);
kernel_fini();
if (!ztest_opts.zo_mmp_test) {
ztest_run_zdb(ztest_opts.zo_pool);
ztest_freeze();
ztest_run_zdb(ztest_opts.zo_pool);
}
(void) pthread_rwlock_destroy(&ztest_name_lock);
mutex_destroy(&ztest_vdev_lock);
mutex_destroy(&ztest_checkpoint_lock);
}
/*
* Kick off threads to run tests on all datasets in parallel.
*/
static void
ztest_run(ztest_shared_t *zs)
{
spa_t *spa;
objset_t *os;
kthread_t *resume_thread, *deadman_thread;
kthread_t **run_threads;
uint64_t object;
int error;
int t, d;
ztest_exiting = B_FALSE;
/*
* Initialize parent/child shared state.
*/
mutex_init(&ztest_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ztest_checkpoint_lock, NULL, MUTEX_DEFAULT, NULL);
VERIFY0(pthread_rwlock_init(&ztest_name_lock, NULL));
zs->zs_thread_start = gethrtime();
zs->zs_thread_stop =
zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC;
zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop);
zs->zs_thread_kill = zs->zs_thread_stop;
if (ztest_random(100) < ztest_opts.zo_killrate) {
zs->zs_thread_kill -=
ztest_random(ztest_opts.zo_passtime * NANOSEC);
}
mutex_init(&zcl.zcl_callbacks_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t),
offsetof(ztest_cb_data_t, zcd_node));
/*
* Open our pool. It may need to be imported first depending on
* what tests were running when the previous pass was terminated.
*/
kernel_init(SPA_MODE_READ | SPA_MODE_WRITE);
error = spa_open(ztest_opts.zo_pool, &spa, FTAG);
if (error) {
VERIFY3S(error, ==, ENOENT);
ztest_import_impl();
VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
zs->zs_metaslab_sz =
1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
}
metaslab_preload_limit = ztest_random(20) + 1;
ztest_spa = spa;
VERIFY0(vdev_raidz_impl_set("cycle"));
dmu_objset_stats_t dds;
VERIFY0(ztest_dmu_objset_own(ztest_opts.zo_pool,
DMU_OST_ANY, B_TRUE, B_TRUE, FTAG, &os));
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
dmu_objset_fast_stat(os, &dds);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
zs->zs_guid = dds.dds_guid;
dmu_objset_disown(os, B_TRUE, FTAG);
/*
* Create a thread to periodically resume suspended I/O.
*/
resume_thread = thread_create(NULL, 0, ztest_resume_thread,
spa, 0, NULL, TS_RUN | TS_JOINABLE, defclsyspri);
/*
* Create a deadman thread and set to panic if we hang.
*/
deadman_thread = thread_create(NULL, 0, ztest_deadman_thread,
zs, 0, NULL, TS_RUN | TS_JOINABLE, defclsyspri);
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC;
/*
* Verify that we can safely inquire about any object,
* whether it's allocated or not. To make it interesting,
* we probe a 5-wide window around each power of two.
* This hits all edge cases, including zero and the max.
*/
for (t = 0; t < 64; t++) {
for (d = -5; d <= 5; d++) {
error = dmu_object_info(spa->spa_meta_objset,
(1ULL << t) + d, NULL);
ASSERT(error == 0 || error == ENOENT ||
error == EINVAL);
}
}
/*
* If we got any ENOSPC errors on the previous run, destroy something.
*/
if (zs->zs_enospc_count != 0) {
int d = ztest_random(ztest_opts.zo_datasets);
ztest_dataset_destroy(d);
}
zs->zs_enospc_count = 0;
/*
* If we were in the middle of ztest_device_removal() and were killed
* we need to ensure the removal and scrub complete before running
* any tests that check ztest_device_removal_active. The removal will
* be restarted automatically when the spa is opened, but we need to
* initiate the scrub manually if it is not already in progress. Note
* that we always run the scrub whenever an indirect vdev exists
* because we have no way of knowing for sure if ztest_device_removal()
* fully completed its scrub before the pool was reimported.
*/
if (spa->spa_removing_phys.sr_state == DSS_SCANNING ||
spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
while (spa->spa_removing_phys.sr_state == DSS_SCANNING)
txg_wait_synced(spa_get_dsl(spa), 0);
error = ztest_scrub_impl(spa);
if (error == EBUSY)
error = 0;
ASSERT0(error);
}
run_threads = umem_zalloc(ztest_opts.zo_threads * sizeof (kthread_t *),
UMEM_NOFAIL);
if (ztest_opts.zo_verbose >= 4)
(void) printf("starting main threads...\n");
/*
* Replay all logs of all datasets in the pool. This is primarily for
* temporary datasets which wouldn't otherwise get replayed, which
* can trigger failures when attempting to offline a SLOG in
* ztest_fault_inject().
*/
(void) dmu_objset_find(ztest_opts.zo_pool, ztest_replay_zil_cb,
NULL, DS_FIND_CHILDREN);
/*
* Kick off all the tests that run in parallel.
*/
for (t = 0; t < ztest_opts.zo_threads; t++) {
if (t < ztest_opts.zo_datasets && ztest_dataset_open(t) != 0) {
umem_free(run_threads, ztest_opts.zo_threads *
sizeof (kthread_t *));
return;
}
run_threads[t] = thread_create(NULL, 0, ztest_thread,
(void *)(uintptr_t)t, 0, NULL, TS_RUN | TS_JOINABLE,
defclsyspri);
}
/*
* Wait for all of the tests to complete.
*/
for (t = 0; t < ztest_opts.zo_threads; t++)
VERIFY0(thread_join(run_threads[t]));
/*
* Close all datasets. This must be done after all the threads
* are joined so we can be sure none of the datasets are in-use
* by any of the threads.
*/
for (t = 0; t < ztest_opts.zo_threads; t++) {
if (t < ztest_opts.zo_datasets)
ztest_dataset_close(t);
}
txg_wait_synced(spa_get_dsl(spa), 0);
zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
zs->zs_space = metaslab_class_get_space(spa_normal_class(spa));
umem_free(run_threads, ztest_opts.zo_threads * sizeof (kthread_t *));
/* Kill the resume and deadman threads */
ztest_exiting = B_TRUE;
VERIFY0(thread_join(resume_thread));
VERIFY0(thread_join(deadman_thread));
ztest_resume(spa);
/*
* Right before closing the pool, kick off a bunch of async I/O;
* spa_close() should wait for it to complete.
*/
for (object = 1; object < 50; object++) {
dmu_prefetch(spa->spa_meta_objset, object, 0, 0, 1ULL << 20,
ZIO_PRIORITY_SYNC_READ);
}
/* Verify that at least one commit cb was called in a timely fashion */
if (zc_cb_counter >= ZTEST_COMMIT_CB_MIN_REG)
VERIFY0(zc_min_txg_delay);
spa_close(spa, FTAG);
/*
* Verify that we can loop over all pools.
*/
mutex_enter(&spa_namespace_lock);
for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa))
if (ztest_opts.zo_verbose > 3)
(void) printf("spa_next: found %s\n", spa_name(spa));
mutex_exit(&spa_namespace_lock);
/*
* Verify that we can export the pool and reimport it under a
* different name.
*/
if ((ztest_random(2) == 0) && !ztest_opts.zo_mmp_test) {
char name[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(name, sizeof (name), "%s_import",
ztest_opts.zo_pool);
ztest_spa_import_export(ztest_opts.zo_pool, name);
ztest_spa_import_export(name, ztest_opts.zo_pool);
}
kernel_fini();
list_destroy(&zcl.zcl_callbacks);
mutex_destroy(&zcl.zcl_callbacks_lock);
(void) pthread_rwlock_destroy(&ztest_name_lock);
mutex_destroy(&ztest_vdev_lock);
mutex_destroy(&ztest_checkpoint_lock);
}
static void
print_time(hrtime_t t, char *timebuf)
{
hrtime_t s = t / NANOSEC;
hrtime_t m = s / 60;
hrtime_t h = m / 60;
hrtime_t d = h / 24;
s -= m * 60;
m -= h * 60;
h -= d * 24;
timebuf[0] = '\0';
if (d)
(void) sprintf(timebuf,
"%llud%02lluh%02llum%02llus", d, h, m, s);
else if (h)
(void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s);
else if (m)
(void) sprintf(timebuf, "%llum%02llus", m, s);
else
(void) sprintf(timebuf, "%llus", s);
}
static nvlist_t *
make_random_props(void)
{
nvlist_t *props;
props = fnvlist_alloc();
if (ztest_random(2) == 0)
return (props);
fnvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE), 1);
return (props);
}
/*
* Create a storage pool with the given name and initial vdev size.
* Then test spa_freeze() functionality.
*/
static void
ztest_init(ztest_shared_t *zs)
{
spa_t *spa;
nvlist_t *nvroot, *props;
int i;
mutex_init(&ztest_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ztest_checkpoint_lock, NULL, MUTEX_DEFAULT, NULL);
VERIFY0(pthread_rwlock_init(&ztest_name_lock, NULL));
kernel_init(SPA_MODE_READ | SPA_MODE_WRITE);
/*
* Create the storage pool.
*/
(void) spa_destroy(ztest_opts.zo_pool);
ztest_shared->zs_vdev_next_leaf = 0;
zs->zs_splits = 0;
zs->zs_mirrors = ztest_opts.zo_mirrors;
nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0,
NULL, ztest_opts.zo_raid_children, zs->zs_mirrors, 1);
props = make_random_props();
/*
* We don't expect the pool to suspend unless maxfaults == 0,
* in which case ztest_fault_inject() temporarily takes away
* the only valid replica.
*/
fnvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE),
MAXFAULTS(zs) ? ZIO_FAILURE_MODE_PANIC : ZIO_FAILURE_MODE_WAIT);
for (i = 0; i < SPA_FEATURES; i++) {
char *buf;
if (!spa_feature_table[i].fi_zfs_mod_supported)
continue;
/*
* 75% chance of using the log space map feature. We want ztest
* to exercise both the code paths that use the log space map
* feature and the ones that don't.
*/
if (i == SPA_FEATURE_LOG_SPACEMAP && ztest_random(4) == 0)
continue;
VERIFY3S(-1, !=, asprintf(&buf, "feature@%s",
spa_feature_table[i].fi_uname));
fnvlist_add_uint64(props, buf, 0);
free(buf);
}
VERIFY0(spa_create(ztest_opts.zo_pool, nvroot, props, NULL, NULL));
fnvlist_free(nvroot);
fnvlist_free(props);
VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
zs->zs_metaslab_sz =
1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
spa_close(spa, FTAG);
kernel_fini();
if (!ztest_opts.zo_mmp_test) {
ztest_run_zdb(ztest_opts.zo_pool);
ztest_freeze();
ztest_run_zdb(ztest_opts.zo_pool);
}
(void) pthread_rwlock_destroy(&ztest_name_lock);
mutex_destroy(&ztest_vdev_lock);
mutex_destroy(&ztest_checkpoint_lock);
}
static void
setup_data_fd(void)
{
static char ztest_name_data[] = "/tmp/ztest.data.XXXXXX";
ztest_fd_data = mkstemp(ztest_name_data);
ASSERT3S(ztest_fd_data, >=, 0);
(void) unlink(ztest_name_data);
}
static int
shared_data_size(ztest_shared_hdr_t *hdr)
{
int size;
size = hdr->zh_hdr_size;
size += hdr->zh_opts_size;
size += hdr->zh_size;
size += hdr->zh_stats_size * hdr->zh_stats_count;
size += hdr->zh_ds_size * hdr->zh_ds_count;
return (size);
}
static void
setup_hdr(void)
{
int size;
ztest_shared_hdr_t *hdr;
hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
ASSERT3P(hdr, !=, MAP_FAILED);
VERIFY0(ftruncate(ztest_fd_data, sizeof (ztest_shared_hdr_t)));
hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t);
hdr->zh_opts_size = sizeof (ztest_shared_opts_t);
hdr->zh_size = sizeof (ztest_shared_t);
hdr->zh_stats_size = sizeof (ztest_shared_callstate_t);
hdr->zh_stats_count = ZTEST_FUNCS;
hdr->zh_ds_size = sizeof (ztest_shared_ds_t);
hdr->zh_ds_count = ztest_opts.zo_datasets;
size = shared_data_size(hdr);
VERIFY0(ftruncate(ztest_fd_data, size));
(void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
}
static void
setup_data(void)
{
int size, offset;
ztest_shared_hdr_t *hdr;
uint8_t *buf;
hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
PROT_READ, MAP_SHARED, ztest_fd_data, 0);
ASSERT3P(hdr, !=, MAP_FAILED);
size = shared_data_size(hdr);
(void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()),
PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
ASSERT3P(hdr, !=, MAP_FAILED);
buf = (uint8_t *)hdr;
offset = hdr->zh_hdr_size;
ztest_shared_opts = (void *)&buf[offset];
offset += hdr->zh_opts_size;
ztest_shared = (void *)&buf[offset];
offset += hdr->zh_size;
ztest_shared_callstate = (void *)&buf[offset];
offset += hdr->zh_stats_size * hdr->zh_stats_count;
ztest_shared_ds = (void *)&buf[offset];
}
static boolean_t
exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp)
{
pid_t pid;
int status;
char *cmdbuf = NULL;
pid = fork();
if (cmd == NULL) {
cmdbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
(void) strlcpy(cmdbuf, getexecname(), MAXPATHLEN);
cmd = cmdbuf;
}
if (pid == -1)
fatal(B_TRUE, "fork failed");
if (pid == 0) { /* child */
char fd_data_str[12];
VERIFY3S(11, >=,
snprintf(fd_data_str, 12, "%d", ztest_fd_data));
VERIFY0(setenv("ZTEST_FD_DATA", fd_data_str, 1));
if (libpath != NULL) {
const char *curlp = getenv("LD_LIBRARY_PATH");
if (curlp == NULL)
VERIFY0(setenv("LD_LIBRARY_PATH", libpath, 1));
else {
char *newlp = NULL;
VERIFY3S(-1, !=,
asprintf(&newlp, "%s:%s", libpath, curlp));
VERIFY0(setenv("LD_LIBRARY_PATH", newlp, 1));
free(newlp);
}
}
(void) execl(cmd, cmd, (char *)NULL);
ztest_dump_core = B_FALSE;
fatal(B_TRUE, "exec failed: %s", cmd);
}
if (cmdbuf != NULL) {
umem_free(cmdbuf, MAXPATHLEN);
cmd = NULL;
}
while (waitpid(pid, &status, 0) != pid)
continue;
if (statusp != NULL)
*statusp = status;
if (WIFEXITED(status)) {
if (WEXITSTATUS(status) != 0) {
(void) fprintf(stderr, "child exited with code %d\n",
WEXITSTATUS(status));
exit(2);
}
return (B_FALSE);
} else if (WIFSIGNALED(status)) {
if (!ignorekill || WTERMSIG(status) != SIGKILL) {
(void) fprintf(stderr, "child died with signal %d\n",
WTERMSIG(status));
exit(3);
}
return (B_TRUE);
} else {
(void) fprintf(stderr, "something strange happened to child\n");
exit(4);
}
}
static void
ztest_run_init(void)
{
int i;
ztest_shared_t *zs = ztest_shared;
/*
* Blow away any existing copy of zpool.cache
*/
(void) remove(spa_config_path);
if (ztest_opts.zo_init == 0) {
if (ztest_opts.zo_verbose >= 1)
(void) printf("Importing pool %s\n",
ztest_opts.zo_pool);
ztest_import(zs);
return;
}
/*
* Create and initialize our storage pool.
*/
for (i = 1; i <= ztest_opts.zo_init; i++) {
memset(zs, 0, sizeof (*zs));
if (ztest_opts.zo_verbose >= 3 &&
ztest_opts.zo_init != 1) {
(void) printf("ztest_init(), pass %d\n", i);
}
ztest_init(zs);
}
}
int
main(int argc, char **argv)
{
int kills = 0;
int iters = 0;
int older = 0;
int newer = 0;
ztest_shared_t *zs;
ztest_info_t *zi;
ztest_shared_callstate_t *zc;
char timebuf[100];
char numbuf[NN_NUMBUF_SZ];
char *cmd;
boolean_t hasalt;
int f, err;
char *fd_data_str = getenv("ZTEST_FD_DATA");
struct sigaction action;
(void) setvbuf(stdout, NULL, _IOLBF, 0);
dprintf_setup(&argc, argv);
zfs_deadman_synctime_ms = 300000;
zfs_deadman_checktime_ms = 30000;
/*
* As two-word space map entries may not come up often (especially
* if pool and vdev sizes are small) we want to force at least some
* of them so the feature get tested.
*/
zfs_force_some_double_word_sm_entries = B_TRUE;
/*
* Verify that even extensively damaged split blocks with many
* segments can be reconstructed in a reasonable amount of time
* when reconstruction is known to be possible.
*
* Note: the lower this value is, the more damage we inflict, and
* the more time ztest spends in recovering that damage. We chose
* to induce damage 1/100th of the time so recovery is tested but
* not so frequently that ztest doesn't get to test other code paths.
*/
zfs_reconstruct_indirect_damage_fraction = 100;
action.sa_handler = sig_handler;
sigemptyset(&action.sa_mask);
action.sa_flags = 0;
if (sigaction(SIGSEGV, &action, NULL) < 0) {
(void) fprintf(stderr, "ztest: cannot catch SIGSEGV: %s.\n",
strerror(errno));
exit(EXIT_FAILURE);
}
if (sigaction(SIGABRT, &action, NULL) < 0) {
(void) fprintf(stderr, "ztest: cannot catch SIGABRT: %s.\n",
strerror(errno));
exit(EXIT_FAILURE);
}
/*
* Force random_get_bytes() to use /dev/urandom in order to prevent
* ztest from needlessly depleting the system entropy pool.
*/
random_path = "/dev/urandom";
ztest_fd_rand = open(random_path, O_RDONLY | O_CLOEXEC);
ASSERT3S(ztest_fd_rand, >=, 0);
if (!fd_data_str) {
process_options(argc, argv);
setup_data_fd();
setup_hdr();
setup_data();
memcpy(ztest_shared_opts, &ztest_opts,
sizeof (*ztest_shared_opts));
} else {
ztest_fd_data = atoi(fd_data_str);
setup_data();
memcpy(&ztest_opts, ztest_shared_opts, sizeof (ztest_opts));
}
ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count);
err = ztest_set_global_vars();
if (err != 0 && !fd_data_str) {
/* error message done by ztest_set_global_vars */
exit(EXIT_FAILURE);
} else {
/* children should not be spawned if setting gvars fails */
VERIFY3S(err, ==, 0);
}
/* Override location of zpool.cache */
VERIFY3S(asprintf((char **)&spa_config_path, "%s/zpool.cache",
ztest_opts.zo_dir), !=, -1);
ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t),
UMEM_NOFAIL);
zs = ztest_shared;
if (fd_data_str) {
metaslab_force_ganging = ztest_opts.zo_metaslab_force_ganging;
metaslab_df_alloc_threshold =
zs->zs_metaslab_df_alloc_threshold;
if (zs->zs_do_init)
ztest_run_init();
else
ztest_run(zs);
exit(0);
}
hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0);
if (ztest_opts.zo_verbose >= 1) {
(void) printf("%"PRIu64" vdevs, %d datasets, %d threads,"
"%d %s disks, %"PRIu64" seconds...\n\n",
ztest_opts.zo_vdevs,
ztest_opts.zo_datasets,
ztest_opts.zo_threads,
ztest_opts.zo_raid_children,
ztest_opts.zo_raid_type,
ztest_opts.zo_time);
}
cmd = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
(void) strlcpy(cmd, getexecname(), MAXNAMELEN);
zs->zs_do_init = B_TRUE;
if (strlen(ztest_opts.zo_alt_ztest) != 0) {
if (ztest_opts.zo_verbose >= 1) {
(void) printf("Executing older ztest for "
"initialization: %s\n", ztest_opts.zo_alt_ztest);
}
VERIFY(!exec_child(ztest_opts.zo_alt_ztest,
ztest_opts.zo_alt_libpath, B_FALSE, NULL));
} else {
VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL));
}
zs->zs_do_init = B_FALSE;
zs->zs_proc_start = gethrtime();
zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC;
for (f = 0; f < ZTEST_FUNCS; f++) {
zi = &ztest_info[f];
zc = ZTEST_GET_SHARED_CALLSTATE(f);
if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop)
zc->zc_next = UINT64_MAX;
else
zc->zc_next = zs->zs_proc_start +
ztest_random(2 * zi->zi_interval[0] + 1);
}
/*
* Run the tests in a loop. These tests include fault injection
* to verify that self-healing data works, and forced crashes
* to verify that we never lose on-disk consistency.
*/
while (gethrtime() < zs->zs_proc_stop) {
int status;
boolean_t killed;
/*
* Initialize the workload counters for each function.
*/
for (f = 0; f < ZTEST_FUNCS; f++) {
zc = ZTEST_GET_SHARED_CALLSTATE(f);
zc->zc_count = 0;
zc->zc_time = 0;
}
/* Set the allocation switch size */
zs->zs_metaslab_df_alloc_threshold =
ztest_random(zs->zs_metaslab_sz / 4) + 1;
if (!hasalt || ztest_random(2) == 0) {
if (hasalt && ztest_opts.zo_verbose >= 1) {
(void) printf("Executing newer ztest: %s\n",
cmd);
}
newer++;
killed = exec_child(cmd, NULL, B_TRUE, &status);
} else {
if (hasalt && ztest_opts.zo_verbose >= 1) {
(void) printf("Executing older ztest: %s\n",
ztest_opts.zo_alt_ztest);
}
older++;
killed = exec_child(ztest_opts.zo_alt_ztest,
ztest_opts.zo_alt_libpath, B_TRUE, &status);
}
if (killed)
kills++;
iters++;
if (ztest_opts.zo_verbose >= 1) {
hrtime_t now = gethrtime();
now = MIN(now, zs->zs_proc_stop);
print_time(zs->zs_proc_stop - now, timebuf);
nicenum(zs->zs_space, numbuf, sizeof (numbuf));
(void) printf("Pass %3d, %8s, %3"PRIu64" ENOSPC, "
"%4.1f%% of %5s used, %3.0f%% done, %8s to go\n",
iters,
WIFEXITED(status) ? "Complete" : "SIGKILL",
zs->zs_enospc_count,
100.0 * zs->zs_alloc / zs->zs_space,
numbuf,
100.0 * (now - zs->zs_proc_start) /
(ztest_opts.zo_time * NANOSEC), timebuf);
}
if (ztest_opts.zo_verbose >= 2) {
(void) printf("\nWorkload summary:\n\n");
(void) printf("%7s %9s %s\n",
"Calls", "Time", "Function");
(void) printf("%7s %9s %s\n",
"-----", "----", "--------");
for (f = 0; f < ZTEST_FUNCS; f++) {
zi = &ztest_info[f];
zc = ZTEST_GET_SHARED_CALLSTATE(f);
print_time(zc->zc_time, timebuf);
(void) printf("%7"PRIu64" %9s %s\n",
zc->zc_count, timebuf,
zi->zi_funcname);
}
(void) printf("\n");
}
if (!ztest_opts.zo_mmp_test)
ztest_run_zdb(ztest_opts.zo_pool);
}
if (ztest_opts.zo_verbose >= 1) {
if (hasalt) {
(void) printf("%d runs of older ztest: %s\n", older,
ztest_opts.zo_alt_ztest);
(void) printf("%d runs of newer ztest: %s\n", newer,
cmd);
}
(void) printf("%d killed, %d completed, %.0f%% kill rate\n",
kills, iters - kills, (100.0 * kills) / MAX(1, iters));
}
umem_free(cmd, MAXNAMELEN);
return (0);
}
diff --git a/sys/contrib/openzfs/config/Shellcheck.am b/sys/contrib/openzfs/config/Shellcheck.am
index 1cff81e04be8..1ab13516066c 100644
--- a/sys/contrib/openzfs/config/Shellcheck.am
+++ b/sys/contrib/openzfs/config/Shellcheck.am
@@ -1,41 +1,42 @@
# Global ShellCheck exclusions:
#
# ShellCheck can't follow non-constant source. Use a directive to specify location. [SC1090]
# Not following: a was not specified as input (see shellcheck -x). [SC1091]
# Prefer putting braces around variable references even when not strictly required. [SC2250]
# Consider invoking this command separately to avoid masking its return value (or use '|| true' to ignore). [SC2312]
+# Command appears to be unreachable. Check usage (or ignore if invoked indirectly). [SC2317]
# In POSIX sh, 'local' is undefined. [SC2039] # older ShellCheck versions
# In POSIX sh, 'local' is undefined. [SC3043] # newer ShellCheck versions
SHELLCHECKSCRIPTS =
JUST_SHELLCHECK_OPTS = $(addprefix shellcheck-here-,$(subst /,^,$(1)))
JUST_CHECKBASHISMS_OPTS = $(addprefix checkbashisms-here-,$(subst /,^,$(1)))
SHELLCHECK_OPTS = $(call JUST_SHELLCHECK_OPTS,$(1)) $(call JUST_CHECKBASHISMS_OPTS,$(1))
PHONY += shellcheck
_STGT = $(subst ^,/,$(subst shellcheck-here-,,$@))
shellcheck-here-%:
if HAVE_SHELLCHECK
- shellcheck --format=gcc --enable=all --exclude=SC1090,SC1091,SC2039,SC2250,SC2312,SC3043 $$([ -n "$(SHELLCHECK_SHELL)" ] && echo "--shell=$(SHELLCHECK_SHELL)") "$$([ -e "$(_STGT)" ] || echo "$(srcdir)/")$(_STGT)"
+ shellcheck --format=gcc --enable=all --exclude=SC1090,SC1091,SC2039,SC2250,SC2312,SC2317,SC3043 $$([ -n "$(SHELLCHECK_SHELL)" ] && echo "--shell=$(SHELLCHECK_SHELL)") "$$([ -e "$(_STGT)" ] || echo "$(srcdir)/")$(_STGT)"
else
@echo "skipping shellcheck of" $(_STGT) "because shellcheck is not installed"
endif
shellcheck: $(SHELLCHECKSCRIPTS) $(call JUST_SHELLCHECK_OPTS,$(SHELLCHECKSCRIPTS))
PHONY += checkbashisms
# command -v *is* specified by POSIX and every shell in existence supports it
_BTGT = $(subst ^,/,$(subst checkbashisms-here-,,$@))
checkbashisms-here-%:
if HAVE_CHECKBASHISMS
! { [ -n "$(SHELLCHECK_SHELL)" ] && echo '#!/bin/$(SHELLCHECK_SHELL)'; cat "$$([ -e "$(_BTGT)" ] || echo "$(srcdir)/")$(_BTGT)"; } | \
checkbashisms -npx 2>&1 | grep -vFe "'command' with option other than -p" -e 'command -v' -e 'any possible bashisms' $(CHECKBASHISMS_IGNORE) >&2
else
@echo "skipping checkbashisms of" $(_BTGT) "because checkbashisms is not installed"
endif
checkbashisms: $(SHELLCHECKSCRIPTS) $(call JUST_CHECKBASHISMS_OPTS,$(SHELLCHECKSCRIPTS))
diff --git a/sys/contrib/openzfs/config/kernel-blkdev.m4 b/sys/contrib/openzfs/config/kernel-blkdev.m4
index 28e5364581ea..887acee670ba 100644
--- a/sys/contrib/openzfs/config/kernel-blkdev.m4
+++ b/sys/contrib/openzfs/config/kernel-blkdev.m4
@@ -1,479 +1,533 @@
dnl #
dnl # 2.6.38 API change,
dnl # Added blkdev_get_by_path()
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_GET_BY_PATH], [
ZFS_LINUX_TEST_SRC([blkdev_get_by_path], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev __attribute__ ((unused)) = NULL;
const char *path = "path";
fmode_t mode = 0;
void *holder = NULL;
bdev = blkdev_get_by_path(path, mode, holder);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_GET_BY_PATH], [
AC_MSG_CHECKING([whether blkdev_get_by_path() exists])
ZFS_LINUX_TEST_RESULT([blkdev_get_by_path], [
AC_MSG_RESULT(yes)
], [
ZFS_LINUX_TEST_ERROR([blkdev_get_by_path()])
])
])
dnl #
dnl # 2.6.38 API change,
dnl # Added blkdev_put()
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_PUT], [
ZFS_LINUX_TEST_SRC([blkdev_put], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev = NULL;
fmode_t mode = 0;
blkdev_put(bdev, mode);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_PUT], [
AC_MSG_CHECKING([whether blkdev_put() exists])
ZFS_LINUX_TEST_RESULT([blkdev_put], [
AC_MSG_RESULT(yes)
], [
ZFS_LINUX_TEST_ERROR([blkdev_put()])
])
])
dnl #
dnl # 4.1 API, exported blkdev_reread_part() symbol, back ported to the
dnl # 3.10.0 CentOS 7.x enterprise kernels.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_REREAD_PART], [
ZFS_LINUX_TEST_SRC([blkdev_reread_part], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev = NULL;
int error;
error = blkdev_reread_part(bdev);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_REREAD_PART], [
AC_MSG_CHECKING([whether blkdev_reread_part() exists])
ZFS_LINUX_TEST_RESULT([blkdev_reread_part], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLKDEV_REREAD_PART, 1,
[blkdev_reread_part() exists])
], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # check_disk_change() was removed in 5.10
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_CHECK_DISK_CHANGE], [
ZFS_LINUX_TEST_SRC([check_disk_change], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev = NULL;
bool error;
error = check_disk_change(bdev);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_CHECK_DISK_CHANGE], [
AC_MSG_CHECKING([whether check_disk_change() exists])
ZFS_LINUX_TEST_RESULT([check_disk_change], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_CHECK_DISK_CHANGE, 1,
[check_disk_change() exists])
], [
AC_MSG_RESULT(no)
])
])
+dnl #
+dnl # 6.5.x API change
+dnl # disk_check_media_change() was added
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_DISK_CHECK_MEDIA_CHANGE], [
+ ZFS_LINUX_TEST_SRC([disk_check_media_change], [
+ #include <linux/fs.h>
+ #include <linux/blkdev.h>
+ ], [
+ struct block_device *bdev = NULL;
+ bool error;
+
+ error = disk_check_media_change(bdev->bd_disk);
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_DISK_CHECK_MEDIA_CHANGE], [
+ AC_MSG_CHECKING([whether disk_check_media_change() exists])
+ ZFS_LINUX_TEST_RESULT([disk_check_media_change], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_DISK_CHECK_MEDIA_CHANGE, 1,
+ [disk_check_media_change() exists])
+ ], [
+ AC_MSG_RESULT(no)
+ ])
+])
+
dnl #
dnl # bdev_kobj() is introduced from 5.12
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_KOBJ], [
ZFS_LINUX_TEST_SRC([bdev_kobj], [
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/kobject.h>
], [
struct block_device *bdev = NULL;
struct kobject *disk_kobj;
disk_kobj = bdev_kobj(bdev);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BDEV_KOBJ], [
AC_MSG_CHECKING([whether bdev_kobj() exists])
ZFS_LINUX_TEST_RESULT([bdev_kobj], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BDEV_KOBJ, 1,
[bdev_kobj() exists])
], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # part_to_dev() was removed in 5.12
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_PART_TO_DEV], [
ZFS_LINUX_TEST_SRC([part_to_dev], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct hd_struct *p = NULL;
struct device *pdev;
pdev = part_to_dev(p);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_PART_TO_DEV], [
AC_MSG_CHECKING([whether part_to_dev() exists])
ZFS_LINUX_TEST_RESULT([part_to_dev], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_PART_TO_DEV, 1,
[part_to_dev() exists])
], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 5.10 API, check_disk_change() is removed, in favor of
dnl # bdev_check_media_change(), which doesn't force revalidation
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_CHECK_MEDIA_CHANGE], [
ZFS_LINUX_TEST_SRC([bdev_check_media_change], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev = NULL;
int error;
error = bdev_check_media_change(bdev);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BDEV_CHECK_MEDIA_CHANGE], [
AC_MSG_CHECKING([whether bdev_check_media_change() exists])
ZFS_LINUX_TEST_RESULT([bdev_check_media_change], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BDEV_CHECK_MEDIA_CHANGE, 1,
[bdev_check_media_change() exists])
], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 2.6.22 API change
dnl # Single argument invalidate_bdev()
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_INVALIDATE_BDEV], [
ZFS_LINUX_TEST_SRC([invalidate_bdev], [
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
],[
struct block_device *bdev = NULL;
invalidate_bdev(bdev);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_INVALIDATE_BDEV], [
AC_MSG_CHECKING([whether invalidate_bdev() exists])
ZFS_LINUX_TEST_RESULT([invalidate_bdev], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([invalidate_bdev()])
])
])
dnl #
dnl # 5.11 API, lookup_bdev() takes dev_t argument.
dnl # 2.6.27 API, lookup_bdev() was first exported.
dnl # 4.4.0-6.21 API, lookup_bdev() on Ubuntu takes mode argument.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_LOOKUP_BDEV], [
ZFS_LINUX_TEST_SRC([lookup_bdev_devt], [
#include <linux/blkdev.h>
], [
int error __attribute__ ((unused));
const char path[] = "/example/path";
dev_t dev;
error = lookup_bdev(path, &dev);
])
ZFS_LINUX_TEST_SRC([lookup_bdev_1arg], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev __attribute__ ((unused));
const char path[] = "/example/path";
bdev = lookup_bdev(path);
])
ZFS_LINUX_TEST_SRC([lookup_bdev_mode], [
#include <linux/fs.h>
], [
struct block_device *bdev __attribute__ ((unused));
const char path[] = "/example/path";
bdev = lookup_bdev(path, FMODE_READ);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_LOOKUP_BDEV], [
AC_MSG_CHECKING([whether lookup_bdev() wants dev_t arg])
ZFS_LINUX_TEST_RESULT_SYMBOL([lookup_bdev_devt],
[lookup_bdev], [fs/block_dev.c], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_DEVT_LOOKUP_BDEV, 1,
[lookup_bdev() wants dev_t arg])
], [
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether lookup_bdev() wants 1 arg])
ZFS_LINUX_TEST_RESULT_SYMBOL([lookup_bdev_1arg],
[lookup_bdev], [fs/block_dev.c], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_1ARG_LOOKUP_BDEV, 1,
[lookup_bdev() wants 1 arg])
], [
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether lookup_bdev() wants mode arg])
ZFS_LINUX_TEST_RESULT_SYMBOL([lookup_bdev_mode],
[lookup_bdev], [fs/block_dev.c], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_MODE_LOOKUP_BDEV, 1,
[lookup_bdev() wants mode arg])
], [
ZFS_LINUX_TEST_ERROR([lookup_bdev()])
])
])
])
])
dnl #
dnl # 2.6.30 API change
dnl #
dnl # The bdev_physical_block_size() interface was added to provide a way
dnl # to determine the smallest write which can be performed without a
dnl # read-modify-write operation.
dnl #
dnl # Unfortunately, this interface isn't entirely reliable because
dnl # drives are sometimes known to misreport this value.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_PHYSICAL_BLOCK_SIZE], [
ZFS_LINUX_TEST_SRC([bdev_physical_block_size], [
#include <linux/blkdev.h>
],[
struct block_device *bdev __attribute__ ((unused)) = NULL;
bdev_physical_block_size(bdev);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BDEV_PHYSICAL_BLOCK_SIZE], [
AC_MSG_CHECKING([whether bdev_physical_block_size() is available])
ZFS_LINUX_TEST_RESULT([bdev_physical_block_size], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([bdev_physical_block_size()])
])
])
dnl #
dnl # 2.6.30 API change
dnl # Added bdev_logical_block_size().
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_LOGICAL_BLOCK_SIZE], [
ZFS_LINUX_TEST_SRC([bdev_logical_block_size], [
#include <linux/blkdev.h>
],[
struct block_device *bdev __attribute__ ((unused)) = NULL;
bdev_logical_block_size(bdev);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BDEV_LOGICAL_BLOCK_SIZE], [
AC_MSG_CHECKING([whether bdev_logical_block_size() is available])
ZFS_LINUX_TEST_RESULT([bdev_logical_block_size], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([bdev_logical_block_size()])
])
])
dnl #
dnl # 5.11 API change
dnl # Added bdev_whole() helper.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_WHOLE], [
ZFS_LINUX_TEST_SRC([bdev_whole], [
#include <linux/blkdev.h>
],[
struct block_device *bdev = NULL;
bdev = bdev_whole(bdev);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BDEV_WHOLE], [
AC_MSG_CHECKING([whether bdev_whole() is available])
ZFS_LINUX_TEST_RESULT([bdev_whole], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BDEV_WHOLE, 1, [bdev_whole() is available])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 5.20 API change,
dnl # Removed bdevname(), snprintf(.., %pg) should be used.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BDEVNAME], [
ZFS_LINUX_TEST_SRC([bdevname], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev __attribute__ ((unused)) = NULL;
char path[BDEVNAME_SIZE];
(void) bdevname(bdev, path);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BDEVNAME], [
AC_MSG_CHECKING([whether bdevname() exists])
ZFS_LINUX_TEST_RESULT([bdevname], [
AC_DEFINE(HAVE_BDEVNAME, 1, [bdevname() is available])
AC_MSG_RESULT(yes)
], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 5.19 API: blkdev_issue_secure_erase()
dnl # 3.10 API: blkdev_issue_discard(..., BLKDEV_DISCARD_SECURE)
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_ISSUE_SECURE_ERASE], [
ZFS_LINUX_TEST_SRC([blkdev_issue_secure_erase], [
#include <linux/blkdev.h>
],[
struct block_device *bdev = NULL;
sector_t sector = 0;
sector_t nr_sects = 0;
int error __attribute__ ((unused));
error = blkdev_issue_secure_erase(bdev,
sector, nr_sects, GFP_KERNEL);
])
ZFS_LINUX_TEST_SRC([blkdev_issue_discard_flags], [
#include <linux/blkdev.h>
],[
struct block_device *bdev = NULL;
sector_t sector = 0;
sector_t nr_sects = 0;
unsigned long flags = 0;
int error __attribute__ ((unused));
error = blkdev_issue_discard(bdev,
sector, nr_sects, GFP_KERNEL, flags);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_ISSUE_SECURE_ERASE], [
AC_MSG_CHECKING([whether blkdev_issue_secure_erase() is available])
ZFS_LINUX_TEST_RESULT([blkdev_issue_secure_erase], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLKDEV_ISSUE_SECURE_ERASE, 1,
[blkdev_issue_secure_erase() is available])
],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether blkdev_issue_discard() is available])
ZFS_LINUX_TEST_RESULT([blkdev_issue_discard_flags], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLKDEV_ISSUE_DISCARD, 1,
[blkdev_issue_discard() is available])
],[
ZFS_LINUX_TEST_ERROR([blkdev_issue_discard()])
])
])
])
dnl #
dnl # 5.13 API change
dnl # blkdev_get_by_path() no longer handles ERESTARTSYS
dnl #
dnl # Unfortunately we're forced to rely solely on the kernel version
dnl # number in order to determine the expected behavior. This was an
dnl # internal change to blkdev_get_by_dev(), see commit a8ed1a0607.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_GET_ERESTARTSYS], [
AC_MSG_CHECKING([whether blkdev_get_by_path() handles ERESTARTSYS])
AS_VERSION_COMPARE([$LINUX_VERSION], [5.13.0], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLKDEV_GET_ERESTARTSYS, 1,
[blkdev_get_by_path() handles ERESTARTSYS])
],[
AC_MSG_RESULT(no)
],[
AC_MSG_RESULT(no)
])
])
+dnl #
+dnl # 6.5.x API change
+dnl # BLK_STS_NEXUS replaced with BLK_STS_RESV_CONFLICT
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BLK_STS_RESV_CONFLICT], [
+ ZFS_LINUX_TEST_SRC([blk_sts_resv_conflict], [
+ #include <linux/blkdev.h>
+ ],[
+ blk_status_t s __attribute__ ((unused)) = BLK_STS_RESV_CONFLICT;
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BLK_STS_RESV_CONFLICT], [
+ AC_MSG_CHECKING([whether BLK_STS_RESV_CONFLICT is defined])
+ ZFS_LINUX_TEST_RESULT([blk_sts_resv_conflict], [
+ AC_DEFINE(HAVE_BLK_STS_RESV_CONFLICT, 1, [BLK_STS_RESV_CONFLICT is defined])
+ AC_MSG_RESULT(yes)
+ ], [
+ AC_MSG_RESULT(no)
+ ])
+ ])
+])
+
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV], [
ZFS_AC_KERNEL_SRC_BLKDEV_GET_BY_PATH
ZFS_AC_KERNEL_SRC_BLKDEV_PUT
ZFS_AC_KERNEL_SRC_BLKDEV_REREAD_PART
ZFS_AC_KERNEL_SRC_BLKDEV_INVALIDATE_BDEV
ZFS_AC_KERNEL_SRC_BLKDEV_LOOKUP_BDEV
ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_LOGICAL_BLOCK_SIZE
ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_PHYSICAL_BLOCK_SIZE
ZFS_AC_KERNEL_SRC_BLKDEV_CHECK_DISK_CHANGE
ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_CHECK_MEDIA_CHANGE
ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_WHOLE
ZFS_AC_KERNEL_SRC_BLKDEV_BDEVNAME
ZFS_AC_KERNEL_SRC_BLKDEV_ISSUE_SECURE_ERASE
ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_KOBJ
ZFS_AC_KERNEL_SRC_BLKDEV_PART_TO_DEV
+ ZFS_AC_KERNEL_SRC_BLKDEV_DISK_CHECK_MEDIA_CHANGE
+ ZFS_AC_KERNEL_SRC_BLKDEV_BLK_STS_RESV_CONFLICT
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV], [
ZFS_AC_KERNEL_BLKDEV_GET_BY_PATH
ZFS_AC_KERNEL_BLKDEV_PUT
ZFS_AC_KERNEL_BLKDEV_REREAD_PART
ZFS_AC_KERNEL_BLKDEV_INVALIDATE_BDEV
ZFS_AC_KERNEL_BLKDEV_LOOKUP_BDEV
ZFS_AC_KERNEL_BLKDEV_BDEV_LOGICAL_BLOCK_SIZE
ZFS_AC_KERNEL_BLKDEV_BDEV_PHYSICAL_BLOCK_SIZE
ZFS_AC_KERNEL_BLKDEV_CHECK_DISK_CHANGE
ZFS_AC_KERNEL_BLKDEV_BDEV_CHECK_MEDIA_CHANGE
ZFS_AC_KERNEL_BLKDEV_BDEV_WHOLE
ZFS_AC_KERNEL_BLKDEV_BDEVNAME
ZFS_AC_KERNEL_BLKDEV_GET_ERESTARTSYS
ZFS_AC_KERNEL_BLKDEV_ISSUE_SECURE_ERASE
ZFS_AC_KERNEL_BLKDEV_BDEV_KOBJ
ZFS_AC_KERNEL_BLKDEV_PART_TO_DEV
+ ZFS_AC_KERNEL_BLKDEV_DISK_CHECK_MEDIA_CHANGE
+ ZFS_AC_KERNEL_BLKDEV_BLK_STS_RESV_CONFLICT
])
diff --git a/sys/contrib/openzfs/config/kernel-vfs-extended-file_range.m4 b/sys/contrib/openzfs/config/kernel-vfs-extended-file_range.m4
new file mode 100644
index 000000000000..a2622313129e
--- /dev/null
+++ b/sys/contrib/openzfs/config/kernel-vfs-extended-file_range.m4
@@ -0,0 +1,50 @@
+dnl #
+dnl # EL7 have backported copy_file_range and clone_file_range and
+dnl # added them to an "extended" file_operations struct.
+dnl #
+dnl # We're testing for both functions in one here, because they will only
+dnl # ever appear together and we don't want to match a similar method in
+dnl # some future vendor kernel.
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_VFS_FILE_OPERATIONS_EXTEND], [
+ ZFS_LINUX_TEST_SRC([vfs_file_operations_extend], [
+ #include <linux/fs.h>
+
+ static ssize_t test_copy_file_range(struct file *src_file,
+ loff_t src_off, struct file *dst_file, loff_t dst_off,
+ size_t len, unsigned int flags) {
+ (void) src_file; (void) src_off;
+ (void) dst_file; (void) dst_off;
+ (void) len; (void) flags;
+ return (0);
+ }
+
+ static int test_clone_file_range(struct file *src_file,
+ loff_t src_off, struct file *dst_file, loff_t dst_off,
+ u64 len) {
+ (void) src_file; (void) src_off;
+ (void) dst_file; (void) dst_off;
+ (void) len;
+ return (0);
+ }
+
+ static const struct file_operations_extend
+ fops __attribute__ ((unused)) = {
+ .kabi_fops = {},
+ .copy_file_range = test_copy_file_range,
+ .clone_file_range = test_clone_file_range,
+ };
+ ],[])
+])
+AC_DEFUN([ZFS_AC_KERNEL_VFS_FILE_OPERATIONS_EXTEND], [
+ AC_MSG_CHECKING([whether file_operations_extend takes \
+.copy_file_range() and .clone_file_range()])
+ ZFS_LINUX_TEST_RESULT([vfs_file_operations_extend], [
+ AC_MSG_RESULT([yes])
+ AC_DEFINE(HAVE_VFS_FILE_OPERATIONS_EXTEND, 1,
+ [file_operations_extend takes .copy_file_range()
+ and .clone_file_range()])
+ ],[
+ AC_MSG_RESULT([no])
+ ])
+])
diff --git a/sys/contrib/openzfs/config/kernel-vfs-file_range.m4 b/sys/contrib/openzfs/config/kernel-vfs-file_range.m4
new file mode 100644
index 000000000000..cc96404d8bbe
--- /dev/null
+++ b/sys/contrib/openzfs/config/kernel-vfs-file_range.m4
@@ -0,0 +1,164 @@
+dnl #
+dnl # The *_file_range APIs have a long history:
+dnl #
+dnl # 2.6.29: BTRFS_IOC_CLONE and BTRFS_IOC_CLONE_RANGE ioctl introduced
+dnl # 3.12: BTRFS_IOC_FILE_EXTENT_SAME ioctl introduced
+dnl #
+dnl # 4.5: copy_file_range() syscall introduced, added to VFS
+dnl # 4.5: BTRFS_IOC_CLONE and BTRFS_IOC_CLONE_RANGE renamed to FICLONE ands
+dnl # FICLONERANGE, added to VFS as clone_file_range()
+dnl # 4.5: BTRFS_IOC_FILE_EXTENT_SAME renamed to FIDEDUPERANGE, added to VFS
+dnl # as dedupe_file_range()
+dnl #
+dnl # 4.20: VFS clone_file_range() and dedupe_file_range() replaced by
+dnl # remap_file_range()
+dnl #
+dnl # 5.3: VFS copy_file_range() expected to do its own fallback,
+dnl # generic_copy_file_range() added to support it
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_VFS_COPY_FILE_RANGE], [
+ ZFS_LINUX_TEST_SRC([vfs_copy_file_range], [
+ #include <linux/fs.h>
+
+ static ssize_t test_copy_file_range(struct file *src_file,
+ loff_t src_off, struct file *dst_file, loff_t dst_off,
+ size_t len, unsigned int flags) {
+ (void) src_file; (void) src_off;
+ (void) dst_file; (void) dst_off;
+ (void) len; (void) flags;
+ return (0);
+ }
+
+ static const struct file_operations
+ fops __attribute__ ((unused)) = {
+ .copy_file_range = test_copy_file_range,
+ };
+ ],[])
+])
+AC_DEFUN([ZFS_AC_KERNEL_VFS_COPY_FILE_RANGE], [
+ AC_MSG_CHECKING([whether fops->copy_file_range() is available])
+ ZFS_LINUX_TEST_RESULT([vfs_copy_file_range], [
+ AC_MSG_RESULT([yes])
+ AC_DEFINE(HAVE_VFS_COPY_FILE_RANGE, 1,
+ [fops->copy_file_range() is available])
+ ],[
+ AC_MSG_RESULT([no])
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_SRC_VFS_GENERIC_COPY_FILE_RANGE], [
+ ZFS_LINUX_TEST_SRC([generic_copy_file_range], [
+ #include <linux/fs.h>
+ ], [
+ struct file *src_file __attribute__ ((unused)) = NULL;
+ loff_t src_off __attribute__ ((unused)) = 0;
+ struct file *dst_file __attribute__ ((unused)) = NULL;
+ loff_t dst_off __attribute__ ((unused)) = 0;
+ size_t len __attribute__ ((unused)) = 0;
+ unsigned int flags __attribute__ ((unused)) = 0;
+ generic_copy_file_range(src_file, src_off, dst_file, dst_off,
+ len, flags);
+ ])
+])
+AC_DEFUN([ZFS_AC_KERNEL_VFS_GENERIC_COPY_FILE_RANGE], [
+ AC_MSG_CHECKING([whether generic_copy_file_range() is available])
+ ZFS_LINUX_TEST_RESULT_SYMBOL([generic_copy_file_range],
+ [generic_copy_file_range], [fs/read_write.c], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_VFS_GENERIC_COPY_FILE_RANGE, 1,
+ [generic_copy_file_range() is available])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_SRC_VFS_CLONE_FILE_RANGE], [
+ ZFS_LINUX_TEST_SRC([vfs_clone_file_range], [
+ #include <linux/fs.h>
+
+ static int test_clone_file_range(struct file *src_file,
+ loff_t src_off, struct file *dst_file, loff_t dst_off,
+ u64 len) {
+ (void) src_file; (void) src_off;
+ (void) dst_file; (void) dst_off;
+ (void) len;
+ return (0);
+ }
+
+ static const struct file_operations
+ fops __attribute__ ((unused)) = {
+ .clone_file_range = test_clone_file_range,
+ };
+ ],[])
+])
+AC_DEFUN([ZFS_AC_KERNEL_VFS_CLONE_FILE_RANGE], [
+ AC_MSG_CHECKING([whether fops->clone_file_range() is available])
+ ZFS_LINUX_TEST_RESULT([vfs_clone_file_range], [
+ AC_MSG_RESULT([yes])
+ AC_DEFINE(HAVE_VFS_CLONE_FILE_RANGE, 1,
+ [fops->clone_file_range() is available])
+ ],[
+ AC_MSG_RESULT([no])
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_SRC_VFS_DEDUPE_FILE_RANGE], [
+ ZFS_LINUX_TEST_SRC([vfs_dedupe_file_range], [
+ #include <linux/fs.h>
+
+ static int test_dedupe_file_range(struct file *src_file,
+ loff_t src_off, struct file *dst_file, loff_t dst_off,
+ u64 len) {
+ (void) src_file; (void) src_off;
+ (void) dst_file; (void) dst_off;
+ (void) len;
+ return (0);
+ }
+
+ static const struct file_operations
+ fops __attribute__ ((unused)) = {
+ .dedupe_file_range = test_dedupe_file_range,
+ };
+ ],[])
+])
+AC_DEFUN([ZFS_AC_KERNEL_VFS_DEDUPE_FILE_RANGE], [
+ AC_MSG_CHECKING([whether fops->dedupe_file_range() is available])
+ ZFS_LINUX_TEST_RESULT([vfs_dedupe_file_range], [
+ AC_MSG_RESULT([yes])
+ AC_DEFINE(HAVE_VFS_DEDUPE_FILE_RANGE, 1,
+ [fops->dedupe_file_range() is available])
+ ],[
+ AC_MSG_RESULT([no])
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_SRC_VFS_REMAP_FILE_RANGE], [
+ ZFS_LINUX_TEST_SRC([vfs_remap_file_range], [
+ #include <linux/fs.h>
+
+ static loff_t test_remap_file_range(struct file *src_file,
+ loff_t src_off, struct file *dst_file, loff_t dst_off,
+ loff_t len, unsigned int flags) {
+ (void) src_file; (void) src_off;
+ (void) dst_file; (void) dst_off;
+ (void) len; (void) flags;
+ return (0);
+ }
+
+ static const struct file_operations
+ fops __attribute__ ((unused)) = {
+ .remap_file_range = test_remap_file_range,
+ };
+ ],[])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_VFS_REMAP_FILE_RANGE], [
+ AC_MSG_CHECKING([whether fops->remap_file_range() is available])
+ ZFS_LINUX_TEST_RESULT([vfs_remap_file_range], [
+ AC_MSG_RESULT([yes])
+ AC_DEFINE(HAVE_VFS_REMAP_FILE_RANGE, 1,
+ [fops->remap_file_range() is available])
+ ],[
+ AC_MSG_RESULT([no])
+ ])
+])
diff --git a/sys/contrib/openzfs/config/kernel.m4 b/sys/contrib/openzfs/config/kernel.m4
index cb7e736c9a43..1487fa2e7793 100644
--- a/sys/contrib/openzfs/config/kernel.m4
+++ b/sys/contrib/openzfs/config/kernel.m4
@@ -1,1012 +1,1024 @@
dnl #
dnl # Default ZFS kernel configuration
dnl #
AC_DEFUN([ZFS_AC_CONFIG_KERNEL], [
AM_COND_IF([BUILD_LINUX], [
dnl # Setup the kernel build environment.
ZFS_AC_KERNEL
ZFS_AC_QAT
dnl # Sanity checks for module building and CONFIG_* defines
ZFS_AC_KERNEL_CONFIG_DEFINED
ZFS_AC_MODULE_SYMVERS
dnl # Sequential ZFS_LINUX_TRY_COMPILE tests
ZFS_AC_KERNEL_FPU_HEADER
ZFS_AC_KERNEL_OBJTOOL_HEADER
ZFS_AC_KERNEL_WAIT_QUEUE_ENTRY_T
ZFS_AC_KERNEL_MISC_MINOR
ZFS_AC_KERNEL_DECLARE_EVENT_CLASS
dnl # Parallel ZFS_LINUX_TEST_SRC / ZFS_LINUX_TEST_RESULT tests
ZFS_AC_KERNEL_TEST_SRC
ZFS_AC_KERNEL_TEST_RESULT
AS_IF([test "$LINUX_OBJ" != "$LINUX"], [
KERNEL_MAKE="$KERNEL_MAKE O=$LINUX_OBJ"
])
AC_SUBST(KERNEL_MAKE)
])
])
dnl #
dnl # Generate and compile all of the kernel API test cases to determine
dnl # which interfaces are available. By invoking the kernel build system
dnl # only once the compilation can be done in parallel significantly
dnl # speeding up the process.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [
ZFS_AC_KERNEL_SRC_OBJTOOL
ZFS_AC_KERNEL_SRC_GLOBAL_PAGE_STATE
ZFS_AC_KERNEL_SRC_ACCESS_OK_TYPE
ZFS_AC_KERNEL_SRC_PDE_DATA
ZFS_AC_KERNEL_SRC_FALLOCATE
ZFS_AC_KERNEL_SRC_FADVISE
ZFS_AC_KERNEL_SRC_GENERIC_FADVISE
ZFS_AC_KERNEL_SRC_2ARGS_ZLIB_DEFLATE_WORKSPACESIZE
ZFS_AC_KERNEL_SRC_RWSEM
ZFS_AC_KERNEL_SRC_SCHED
ZFS_AC_KERNEL_SRC_USLEEP_RANGE
ZFS_AC_KERNEL_SRC_KMEM_CACHE
ZFS_AC_KERNEL_SRC_KVMALLOC
ZFS_AC_KERNEL_SRC_VMALLOC_PAGE_KERNEL
ZFS_AC_KERNEL_SRC_WAIT
ZFS_AC_KERNEL_SRC_INODE_TIMES
ZFS_AC_KERNEL_SRC_INODE_LOCK
ZFS_AC_KERNEL_SRC_GROUP_INFO_GID
ZFS_AC_KERNEL_SRC_RW
ZFS_AC_KERNEL_SRC_TIMER_SETUP
ZFS_AC_KERNEL_SRC_SUPER_USER_NS
ZFS_AC_KERNEL_SRC_PROC_OPERATIONS
ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS
ZFS_AC_KERNEL_SRC_BIO
ZFS_AC_KERNEL_SRC_BLKDEV
ZFS_AC_KERNEL_SRC_BLK_QUEUE
ZFS_AC_KERNEL_SRC_GENHD_FLAGS
ZFS_AC_KERNEL_SRC_REVALIDATE_DISK
ZFS_AC_KERNEL_SRC_GET_DISK_RO
ZFS_AC_KERNEL_SRC_GENERIC_READLINK_GLOBAL
ZFS_AC_KERNEL_SRC_DISCARD_GRANULARITY
ZFS_AC_KERNEL_SRC_INODE_OWNER_OR_CAPABLE
ZFS_AC_KERNEL_SRC_XATTR
ZFS_AC_KERNEL_SRC_ACL
ZFS_AC_KERNEL_SRC_INODE_SETATTR
ZFS_AC_KERNEL_SRC_INODE_GETATTR
ZFS_AC_KERNEL_SRC_INODE_SET_FLAGS
ZFS_AC_KERNEL_SRC_INODE_SET_IVERSION
ZFS_AC_KERNEL_SRC_SHOW_OPTIONS
ZFS_AC_KERNEL_SRC_FILE_INODE
ZFS_AC_KERNEL_SRC_FILE_DENTRY
ZFS_AC_KERNEL_SRC_FSYNC
ZFS_AC_KERNEL_SRC_AIO_FSYNC
ZFS_AC_KERNEL_SRC_EVICT_INODE
ZFS_AC_KERNEL_SRC_DIRTY_INODE
ZFS_AC_KERNEL_SRC_SHRINKER
ZFS_AC_KERNEL_SRC_MKDIR
ZFS_AC_KERNEL_SRC_LOOKUP_FLAGS
ZFS_AC_KERNEL_SRC_CREATE
ZFS_AC_KERNEL_SRC_PERMISSION
ZFS_AC_KERNEL_SRC_GET_LINK
ZFS_AC_KERNEL_SRC_PUT_LINK
ZFS_AC_KERNEL_SRC_TMPFILE
ZFS_AC_KERNEL_SRC_AUTOMOUNT
ZFS_AC_KERNEL_SRC_ENCODE_FH_WITH_INODE
ZFS_AC_KERNEL_SRC_COMMIT_METADATA
ZFS_AC_KERNEL_SRC_CLEAR_INODE
ZFS_AC_KERNEL_SRC_SETATTR_PREPARE
ZFS_AC_KERNEL_SRC_INSERT_INODE_LOCKED
ZFS_AC_KERNEL_SRC_DENTRY
ZFS_AC_KERNEL_SRC_DENTRY_ALIAS_D_U
ZFS_AC_KERNEL_SRC_TRUNCATE_SETSIZE
ZFS_AC_KERNEL_SRC_SECURITY_INODE
ZFS_AC_KERNEL_SRC_FST_MOUNT
ZFS_AC_KERNEL_SRC_BDI
ZFS_AC_KERNEL_SRC_SET_NLINK
ZFS_AC_KERNEL_SRC_SGET
ZFS_AC_KERNEL_SRC_LSEEK_EXECUTE
ZFS_AC_KERNEL_SRC_VFS_FILEMAP_DIRTY_FOLIO
ZFS_AC_KERNEL_SRC_VFS_READ_FOLIO
ZFS_AC_KERNEL_SRC_VFS_GETATTR
ZFS_AC_KERNEL_SRC_VFS_FSYNC_2ARGS
ZFS_AC_KERNEL_SRC_VFS_ITERATE
ZFS_AC_KERNEL_SRC_VFS_DIRECT_IO
ZFS_AC_KERNEL_SRC_VFS_READPAGES
ZFS_AC_KERNEL_SRC_VFS_SET_PAGE_DIRTY_NOBUFFERS
ZFS_AC_KERNEL_SRC_VFS_RW_ITERATE
ZFS_AC_KERNEL_SRC_VFS_GENERIC_WRITE_CHECKS
ZFS_AC_KERNEL_SRC_VFS_IOV_ITER
+ ZFS_AC_KERNEL_SRC_VFS_COPY_FILE_RANGE
+ ZFS_AC_KERNEL_SRC_VFS_GENERIC_COPY_FILE_RANGE
+ ZFS_AC_KERNEL_SRC_VFS_REMAP_FILE_RANGE
+ ZFS_AC_KERNEL_SRC_VFS_CLONE_FILE_RANGE
+ ZFS_AC_KERNEL_SRC_VFS_DEDUPE_FILE_RANGE
+ ZFS_AC_KERNEL_SRC_VFS_FILE_OPERATIONS_EXTEND
ZFS_AC_KERNEL_SRC_KMAP_ATOMIC_ARGS
ZFS_AC_KERNEL_SRC_FOLLOW_DOWN_ONE
ZFS_AC_KERNEL_SRC_MAKE_REQUEST_FN
ZFS_AC_KERNEL_SRC_GENERIC_IO_ACCT
ZFS_AC_KERNEL_SRC_FPU
ZFS_AC_KERNEL_SRC_FMODE_T
ZFS_AC_KERNEL_SRC_KUIDGID_T
ZFS_AC_KERNEL_SRC_KUID_HELPERS
ZFS_AC_KERNEL_SRC_RENAME
ZFS_AC_KERNEL_SRC_CURRENT_TIME
ZFS_AC_KERNEL_SRC_USERNS_CAPABILITIES
ZFS_AC_KERNEL_SRC_IN_COMPAT_SYSCALL
ZFS_AC_KERNEL_SRC_KTIME
ZFS_AC_KERNEL_SRC_TOTALRAM_PAGES_FUNC
ZFS_AC_KERNEL_SRC_TOTALHIGH_PAGES
ZFS_AC_KERNEL_SRC_KSTRTOUL
ZFS_AC_KERNEL_SRC_PERCPU
ZFS_AC_KERNEL_SRC_CPU_HOTPLUG
ZFS_AC_KERNEL_SRC_GENERIC_FILLATTR
ZFS_AC_KERNEL_SRC_MKNOD
ZFS_AC_KERNEL_SRC_SYMLINK
ZFS_AC_KERNEL_SRC_BIO_MAX_SEGS
ZFS_AC_KERNEL_SRC_SIGNAL_STOP
ZFS_AC_KERNEL_SRC_SIGINFO
ZFS_AC_KERNEL_SRC_SYSFS
ZFS_AC_KERNEL_SRC_SET_SPECIAL_STATE
ZFS_AC_KERNEL_SRC_STANDALONE_LINUX_STDARG
ZFS_AC_KERNEL_SRC_PAGEMAP_FOLIO_WAIT_BIT
ZFS_AC_KERNEL_SRC_ADD_DISK
ZFS_AC_KERNEL_SRC_KTHREAD
ZFS_AC_KERNEL_SRC_ZERO_PAGE
ZFS_AC_KERNEL_SRC___COPY_FROM_USER_INATOMIC
ZFS_AC_KERNEL_SRC_USER_NS_COMMON_INUM
ZFS_AC_KERNEL_SRC_IDMAP_MNT_API
ZFS_AC_KERNEL_SRC_IATTR_VFSID
ZFS_AC_KERNEL_SRC_FILEMAP
ZFS_AC_KERNEL_SRC_WRITEPAGE_T
ZFS_AC_KERNEL_SRC_RECLAIMED
case "$host_cpu" in
powerpc*)
ZFS_AC_KERNEL_SRC_CPU_HAS_FEATURE
ZFS_AC_KERNEL_SRC_FLUSH_DCACHE_PAGE
;;
esac
AC_MSG_CHECKING([for available kernel interfaces])
ZFS_LINUX_TEST_COMPILE_ALL([kabi])
AC_MSG_RESULT([done])
])
dnl #
dnl # Check results of kernel interface tests.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [
ZFS_AC_KERNEL_ACCESS_OK_TYPE
ZFS_AC_KERNEL_GLOBAL_PAGE_STATE
ZFS_AC_KERNEL_OBJTOOL
ZFS_AC_KERNEL_PDE_DATA
ZFS_AC_KERNEL_FALLOCATE
ZFS_AC_KERNEL_FADVISE
ZFS_AC_KERNEL_GENERIC_FADVISE
ZFS_AC_KERNEL_2ARGS_ZLIB_DEFLATE_WORKSPACESIZE
ZFS_AC_KERNEL_RWSEM
ZFS_AC_KERNEL_SCHED
ZFS_AC_KERNEL_USLEEP_RANGE
ZFS_AC_KERNEL_KMEM_CACHE
ZFS_AC_KERNEL_KVMALLOC
ZFS_AC_KERNEL_VMALLOC_PAGE_KERNEL
ZFS_AC_KERNEL_WAIT
ZFS_AC_KERNEL_INODE_TIMES
ZFS_AC_KERNEL_INODE_LOCK
ZFS_AC_KERNEL_GROUP_INFO_GID
ZFS_AC_KERNEL_RW
ZFS_AC_KERNEL_TIMER_SETUP
ZFS_AC_KERNEL_SUPER_USER_NS
ZFS_AC_KERNEL_PROC_OPERATIONS
ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS
ZFS_AC_KERNEL_BIO
ZFS_AC_KERNEL_BLKDEV
ZFS_AC_KERNEL_BLK_QUEUE
ZFS_AC_KERNEL_GENHD_FLAGS
ZFS_AC_KERNEL_REVALIDATE_DISK
ZFS_AC_KERNEL_GET_DISK_RO
ZFS_AC_KERNEL_GENERIC_READLINK_GLOBAL
ZFS_AC_KERNEL_DISCARD_GRANULARITY
ZFS_AC_KERNEL_INODE_OWNER_OR_CAPABLE
ZFS_AC_KERNEL_XATTR
ZFS_AC_KERNEL_ACL
ZFS_AC_KERNEL_INODE_SETATTR
ZFS_AC_KERNEL_INODE_GETATTR
ZFS_AC_KERNEL_INODE_SET_FLAGS
ZFS_AC_KERNEL_INODE_SET_IVERSION
ZFS_AC_KERNEL_SHOW_OPTIONS
ZFS_AC_KERNEL_FILE_INODE
ZFS_AC_KERNEL_FILE_DENTRY
ZFS_AC_KERNEL_FSYNC
ZFS_AC_KERNEL_AIO_FSYNC
ZFS_AC_KERNEL_EVICT_INODE
ZFS_AC_KERNEL_DIRTY_INODE
ZFS_AC_KERNEL_SHRINKER
ZFS_AC_KERNEL_MKDIR
ZFS_AC_KERNEL_LOOKUP_FLAGS
ZFS_AC_KERNEL_CREATE
ZFS_AC_KERNEL_PERMISSION
ZFS_AC_KERNEL_GET_LINK
ZFS_AC_KERNEL_PUT_LINK
ZFS_AC_KERNEL_TMPFILE
ZFS_AC_KERNEL_AUTOMOUNT
ZFS_AC_KERNEL_ENCODE_FH_WITH_INODE
ZFS_AC_KERNEL_COMMIT_METADATA
ZFS_AC_KERNEL_CLEAR_INODE
ZFS_AC_KERNEL_SETATTR_PREPARE
ZFS_AC_KERNEL_INSERT_INODE_LOCKED
ZFS_AC_KERNEL_DENTRY
ZFS_AC_KERNEL_DENTRY_ALIAS_D_U
ZFS_AC_KERNEL_TRUNCATE_SETSIZE
ZFS_AC_KERNEL_SECURITY_INODE
ZFS_AC_KERNEL_FST_MOUNT
ZFS_AC_KERNEL_BDI
ZFS_AC_KERNEL_SET_NLINK
ZFS_AC_KERNEL_SGET
ZFS_AC_KERNEL_LSEEK_EXECUTE
ZFS_AC_KERNEL_VFS_FILEMAP_DIRTY_FOLIO
ZFS_AC_KERNEL_VFS_READ_FOLIO
ZFS_AC_KERNEL_VFS_GETATTR
ZFS_AC_KERNEL_VFS_FSYNC_2ARGS
ZFS_AC_KERNEL_VFS_ITERATE
ZFS_AC_KERNEL_VFS_DIRECT_IO
ZFS_AC_KERNEL_VFS_READPAGES
ZFS_AC_KERNEL_VFS_SET_PAGE_DIRTY_NOBUFFERS
ZFS_AC_KERNEL_VFS_RW_ITERATE
ZFS_AC_KERNEL_VFS_GENERIC_WRITE_CHECKS
ZFS_AC_KERNEL_VFS_IOV_ITER
+ ZFS_AC_KERNEL_VFS_COPY_FILE_RANGE
+ ZFS_AC_KERNEL_VFS_GENERIC_COPY_FILE_RANGE
+ ZFS_AC_KERNEL_VFS_REMAP_FILE_RANGE
+ ZFS_AC_KERNEL_VFS_CLONE_FILE_RANGE
+ ZFS_AC_KERNEL_VFS_DEDUPE_FILE_RANGE
+ ZFS_AC_KERNEL_VFS_FILE_OPERATIONS_EXTEND
ZFS_AC_KERNEL_KMAP_ATOMIC_ARGS
ZFS_AC_KERNEL_FOLLOW_DOWN_ONE
ZFS_AC_KERNEL_MAKE_REQUEST_FN
ZFS_AC_KERNEL_GENERIC_IO_ACCT
ZFS_AC_KERNEL_FPU
ZFS_AC_KERNEL_FMODE_T
ZFS_AC_KERNEL_KUIDGID_T
ZFS_AC_KERNEL_KUID_HELPERS
ZFS_AC_KERNEL_RENAME
ZFS_AC_KERNEL_CURRENT_TIME
ZFS_AC_KERNEL_USERNS_CAPABILITIES
ZFS_AC_KERNEL_IN_COMPAT_SYSCALL
ZFS_AC_KERNEL_KTIME
ZFS_AC_KERNEL_TOTALRAM_PAGES_FUNC
ZFS_AC_KERNEL_TOTALHIGH_PAGES
ZFS_AC_KERNEL_KSTRTOUL
ZFS_AC_KERNEL_PERCPU
ZFS_AC_KERNEL_CPU_HOTPLUG
ZFS_AC_KERNEL_GENERIC_FILLATTR
ZFS_AC_KERNEL_MKNOD
ZFS_AC_KERNEL_SYMLINK
ZFS_AC_KERNEL_BIO_MAX_SEGS
ZFS_AC_KERNEL_SIGNAL_STOP
ZFS_AC_KERNEL_SIGINFO
ZFS_AC_KERNEL_SYSFS
ZFS_AC_KERNEL_SET_SPECIAL_STATE
ZFS_AC_KERNEL_STANDALONE_LINUX_STDARG
ZFS_AC_KERNEL_PAGEMAP_FOLIO_WAIT_BIT
ZFS_AC_KERNEL_ADD_DISK
ZFS_AC_KERNEL_KTHREAD
ZFS_AC_KERNEL_ZERO_PAGE
ZFS_AC_KERNEL___COPY_FROM_USER_INATOMIC
ZFS_AC_KERNEL_USER_NS_COMMON_INUM
ZFS_AC_KERNEL_IDMAP_MNT_API
ZFS_AC_KERNEL_IATTR_VFSID
ZFS_AC_KERNEL_FILEMAP
ZFS_AC_KERNEL_WRITEPAGE_T
ZFS_AC_KERNEL_RECLAIMED
case "$host_cpu" in
powerpc*)
ZFS_AC_KERNEL_CPU_HAS_FEATURE
ZFS_AC_KERNEL_FLUSH_DCACHE_PAGE
;;
esac
])
dnl #
dnl # Detect name used for Module.symvers file in kernel
dnl #
AC_DEFUN([ZFS_AC_MODULE_SYMVERS], [
modpost=$LINUX/scripts/Makefile.modpost
AC_MSG_CHECKING([kernel file name for module symbols])
AS_IF([test "x$enable_linux_builtin" != xyes -a -f "$modpost"], [
AS_IF([grep -q Modules.symvers $modpost], [
LINUX_SYMBOLS=Modules.symvers
], [
LINUX_SYMBOLS=Module.symvers
])
AS_IF([test ! -f "$LINUX_OBJ/$LINUX_SYMBOLS"], [
AC_MSG_ERROR([
*** Please make sure the kernel devel package for your distribution
*** is installed. If you are building with a custom kernel, make sure
*** the kernel is configured, built, and the '--with-linux=PATH'
*** configure option refers to the location of the kernel source.
])
])
], [
LINUX_SYMBOLS=NONE
])
AC_MSG_RESULT($LINUX_SYMBOLS)
AC_SUBST(LINUX_SYMBOLS)
])
dnl #
dnl # Detect the kernel to be built against
dnl #
dnl # Most modern Linux distributions have separate locations for bare
dnl # source (source) and prebuilt (build) files. Additionally, there are
dnl # `source` and `build` symlinks in `/lib/modules/$(KERNEL_VERSION)`
dnl # pointing to them. The directory search order is now:
dnl #
dnl # - `configure` command line values if both `--with-linux` and
dnl # `--with-linux-obj` were defined
dnl #
dnl # - If only `--with-linux` was defined, `--with-linux-obj` is assumed
dnl # to have the same value as `--with-linux`
dnl #
dnl # - If neither `--with-linux` nor `--with-linux-obj` were defined
dnl # autodetection is used:
dnl #
dnl # - `/lib/modules/$(uname -r)/{source,build}` respectively, if exist.
dnl #
dnl # - If only `/lib/modules/$(uname -r)/build` exists, it is assumed
dnl # to be both source and build directory.
dnl #
dnl # - The first directory in `/lib/modules` with the highest version
dnl # number according to `sort -V` which contains both `source` and
dnl # `build` symlinks/directories. If module directory contains only
dnl # `build` component, it is assumed to be both source and build
dnl # directory.
dnl #
dnl # - Last resort: the first directory matching `/usr/src/kernels/*`
dnl # and `/usr/src/linux-*` with the highest version number according
dnl # to `sort -V` is assumed to be both source and build directory.
dnl #
AC_DEFUN([ZFS_AC_KERNEL], [
AC_ARG_WITH([linux],
AS_HELP_STRING([--with-linux=PATH],
[Path to kernel source]),
[kernelsrc="$withval"])
AC_ARG_WITH(linux-obj,
AS_HELP_STRING([--with-linux-obj=PATH],
[Path to kernel build objects]),
[kernelbuild="$withval"])
AC_MSG_CHECKING([kernel source and build directories])
AS_IF([test -n "$kernelsrc" && test -z "$kernelbuild"], [
kernelbuild="$kernelsrc"
], [test -z "$kernelsrc"], [
AS_IF([test -e "/lib/modules/$(uname -r)/source" && \
test -e "/lib/modules/$(uname -r)/build"], [
src="/lib/modules/$(uname -r)/source"
build="/lib/modules/$(uname -r)/build"
], [test -e "/lib/modules/$(uname -r)/build"], [
build="/lib/modules/$(uname -r)/build"
src="$build"
], [
src=
for d in $(ls -1d /lib/modules/* 2>/dev/null | sort -Vr); do
if test -e "$d/source" && test -e "$d/build"; then
src="$d/source"
build="$d/build"
break
fi
if test -e "$d/build"; then
src="$d/build"
build="$d/build"
break
fi
done
# the least reliable method
if test -z "$src"; then
src=$(ls -1d /usr/src/kernels/* /usr/src/linux-* \
2>/dev/null | grep -v obj | sort -Vr | head -1)
build="$src"
fi
])
AS_IF([test -n "$src" && test -e "$src"], [
kernelsrc=$(readlink -e "$src")
], [
kernelsrc="[Not found]"
])
AS_IF([test -n "$build" && test -e "$build"], [
kernelbuild=$(readlink -e "$build")
], [
kernelbuild="[Not found]"
])
], [
AS_IF([test "$kernelsrc" = "NONE"], [
kernsrcver=NONE
])
withlinux=yes
])
AC_MSG_RESULT([done])
AC_MSG_CHECKING([kernel source directory])
AC_MSG_RESULT([$kernelsrc])
AC_MSG_CHECKING([kernel build directory])
AC_MSG_RESULT([$kernelbuild])
AS_IF([test ! -d "$kernelsrc" || test ! -d "$kernelbuild"], [
AC_MSG_ERROR([
*** Please make sure the kernel devel package for your distribution
*** is installed and then try again. If that fails, you can specify the
*** location of the kernel source and build with the '--with-linux=PATH' and
*** '--with-linux-obj=PATH' options respectively.])
])
AC_MSG_CHECKING([kernel source version])
utsrelease1=$kernelbuild/include/linux/version.h
utsrelease2=$kernelbuild/include/linux/utsrelease.h
utsrelease3=$kernelbuild/include/generated/utsrelease.h
AS_IF([test -r $utsrelease1 && grep -qF UTS_RELEASE $utsrelease1], [
utsrelease=$utsrelease1
], [test -r $utsrelease2 && grep -qF UTS_RELEASE $utsrelease2], [
utsrelease=$utsrelease2
], [test -r $utsrelease3 && grep -qF UTS_RELEASE $utsrelease3], [
utsrelease=$utsrelease3
])
AS_IF([test -n "$utsrelease"], [
kernsrcver=$($AWK '/UTS_RELEASE/ { gsub(/"/, "", $[3]); print $[3] }' $utsrelease)
AS_IF([test -z "$kernsrcver"], [
AC_MSG_RESULT([Not found])
AC_MSG_ERROR([
*** Cannot determine kernel version.
])
])
], [
AC_MSG_RESULT([Not found])
if test "x$enable_linux_builtin" != xyes; then
AC_MSG_ERROR([
*** Cannot find UTS_RELEASE definition.
])
else
AC_MSG_ERROR([
*** Cannot find UTS_RELEASE definition.
*** Please run 'make prepare' inside the kernel source tree.])
fi
])
AC_MSG_RESULT([$kernsrcver])
AS_VERSION_COMPARE([$kernsrcver], [$ZFS_META_KVER_MIN], [
AC_MSG_ERROR([
*** Cannot build against kernel version $kernsrcver.
*** The minimum supported kernel version is $ZFS_META_KVER_MIN.
])
])
LINUX=${kernelsrc}
LINUX_OBJ=${kernelbuild}
LINUX_VERSION=${kernsrcver}
AC_SUBST(LINUX)
AC_SUBST(LINUX_OBJ)
AC_SUBST(LINUX_VERSION)
])
dnl #
dnl # Detect the QAT module to be built against, QAT provides hardware
dnl # acceleration for data compression:
dnl #
dnl # https://01.org/intel-quickassist-technology
dnl #
dnl # 1) Download and install QAT driver from the above link
dnl # 2) Start QAT driver in your system:
dnl # service qat_service start
dnl # 3) Enable QAT in ZFS, e.g.:
dnl # ./configure --with-qat=<qat-driver-path>/QAT1.6
dnl # make
dnl # 4) Set GZIP compression in ZFS dataset:
dnl # zfs set compression = gzip <dataset>
dnl #
dnl # Then the data written to this ZFS pool is compressed by QAT accelerator
dnl # automatically, and de-compressed by QAT when read from the pool.
dnl #
dnl # 1) Get QAT hardware statistics with:
dnl # cat /proc/icp_dh895xcc_dev/qat
dnl # 2) To disable QAT:
dnl # insmod zfs.ko zfs_qat_disable=1
dnl #
AC_DEFUN([ZFS_AC_QAT], [
AC_ARG_WITH([qat],
AS_HELP_STRING([--with-qat=PATH],
[Path to qat source]),
AS_IF([test "$withval" = "yes"],
AC_MSG_ERROR([--with-qat=PATH requires a PATH]),
[qatsrc="$withval"]))
AC_ARG_WITH([qat-obj],
AS_HELP_STRING([--with-qat-obj=PATH],
[Path to qat build objects]),
[qatbuild="$withval"])
AS_IF([test ! -z "${qatsrc}"], [
AC_MSG_CHECKING([qat source directory])
AC_MSG_RESULT([$qatsrc])
QAT_SRC="${qatsrc}/quickassist"
AS_IF([ test ! -e "$QAT_SRC/include/cpa.h"], [
AC_MSG_ERROR([
*** Please make sure the qat driver package is installed
*** and specify the location of the qat source with the
*** '--with-qat=PATH' option then try again. Failed to
*** find cpa.h in:
${QAT_SRC}/include])
])
])
AS_IF([test ! -z "${qatsrc}"], [
AC_MSG_CHECKING([qat build directory])
AS_IF([test -z "$qatbuild"], [
qatbuild="${qatsrc}/build"
])
AC_MSG_RESULT([$qatbuild])
QAT_OBJ=${qatbuild}
AS_IF([ ! test -e "$QAT_OBJ/icp_qa_al.ko" && ! test -e "$QAT_OBJ/qat_api.ko"], [
AC_MSG_ERROR([
*** Please make sure the qat driver is installed then try again.
*** Failed to find icp_qa_al.ko or qat_api.ko in:
$QAT_OBJ])
])
AC_SUBST(QAT_SRC)
AC_SUBST(QAT_OBJ)
AC_DEFINE(HAVE_QAT, 1,
[qat is enabled and existed])
])
dnl #
dnl # Detect the name used for the QAT Module.symvers file.
dnl #
AS_IF([test ! -z "${qatsrc}"], [
AC_MSG_CHECKING([qat file for module symbols])
QAT_SYMBOLS=$QAT_SRC/lookaside/access_layer/src/Module.symvers
AS_IF([test -r $QAT_SYMBOLS], [
AC_MSG_RESULT([$QAT_SYMBOLS])
AC_SUBST(QAT_SYMBOLS)
],[
AC_MSG_ERROR([
*** Please make sure the qat driver is installed then try again.
*** Failed to find Module.symvers in:
$QAT_SYMBOLS
])
])
])
])
dnl #
dnl # ZFS_LINUX_CONFTEST_H
dnl #
AC_DEFUN([ZFS_LINUX_CONFTEST_H], [
test -d build/$2 || mkdir -p build/$2
cat - <<_ACEOF >build/$2/$2.h
$1
_ACEOF
])
dnl #
dnl # ZFS_LINUX_CONFTEST_C
dnl #
AC_DEFUN([ZFS_LINUX_CONFTEST_C], [
test -d build/$2 || mkdir -p build/$2
cat confdefs.h - <<_ACEOF >build/$2/$2.c
$1
_ACEOF
])
dnl #
dnl # ZFS_LINUX_CONFTEST_MAKEFILE
dnl #
dnl # $1 - test case name
dnl # $2 - add to top-level Makefile
dnl # $3 - additional build flags
dnl #
AC_DEFUN([ZFS_LINUX_CONFTEST_MAKEFILE], [
test -d build || mkdir -p build
test -d build/$1 || mkdir -p build/$1
file=build/$1/Makefile
dnl # Example command line to manually build source.
cat - <<_ACEOF >$file
# Example command line to manually build source
# make modules -C $LINUX_OBJ $ARCH_UM M=$PWD/build/$1
ccflags-y := -Werror $FRAME_LARGER_THAN
_ACEOF
dnl # Additional custom CFLAGS as requested.
m4_ifval($3, [echo "ccflags-y += $3" >>$file], [])
dnl # Test case source
echo "obj-m := $1.o" >>$file
AS_IF([test "x$2" = "xyes"], [echo "obj-m += $1/" >>build/Makefile], [])
])
dnl #
dnl # ZFS_LINUX_TEST_PROGRAM(C)([PROLOGUE], [BODY])
dnl #
m4_define([ZFS_LINUX_TEST_PROGRAM], [
#include <linux/module.h>
$1
int
main (void)
{
$2
;
return 0;
}
MODULE_DESCRIPTION("conftest");
MODULE_AUTHOR(ZFS_META_AUTHOR);
MODULE_VERSION(ZFS_META_VERSION "-" ZFS_META_RELEASE);
MODULE_LICENSE($3);
])
dnl #
dnl # ZFS_LINUX_TEST_REMOVE
dnl #
dnl # Removes the specified test source and results.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_REMOVE], [
test -d build/$1 && rm -Rf build/$1
test -f build/Makefile && sed '/$1/d' build/Makefile
])
dnl #
dnl # ZFS_LINUX_COMPILE
dnl #
dnl # $1 - build dir
dnl # $2 - test command
dnl # $3 - pass command
dnl # $4 - fail command
dnl # $5 - set KBUILD_MODPOST_NOFINAL='yes'
dnl # $6 - set KBUILD_MODPOST_WARN='yes'
dnl #
dnl # Used internally by ZFS_LINUX_TEST_{COMPILE,MODPOST}
dnl #
AC_DEFUN([ZFS_LINUX_COMPILE], [
AC_ARG_VAR([KERNEL_CC], [C compiler for
building kernel modules])
AC_ARG_VAR([KERNEL_LD], [Linker for
building kernel modules])
AC_ARG_VAR([KERNEL_LLVM], [Binary option to
build kernel modules with LLVM/CLANG toolchain])
AC_TRY_COMMAND([
KBUILD_MODPOST_NOFINAL="$5" KBUILD_MODPOST_WARN="$6"
make modules -k -j$TEST_JOBS ${KERNEL_CC:+CC=$KERNEL_CC}
${KERNEL_LD:+LD=$KERNEL_LD} ${KERNEL_LLVM:+LLVM=$KERNEL_LLVM}
CONFIG_MODULES=y CFLAGS_MODULE=-DCONFIG_MODULES
-C $LINUX_OBJ $ARCH_UM M=$PWD/$1 >$1/build.log 2>&1])
AS_IF([AC_TRY_COMMAND([$2])], [$3], [$4])
])
dnl #
dnl # ZFS_LINUX_TEST_COMPILE
dnl #
dnl # Perform a full compile excluding the final modpost phase.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_COMPILE], [
ZFS_LINUX_COMPILE([$2], [test -f $2/build.log], [
mv $2/Makefile $2/Makefile.compile.$1
mv $2/build.log $2/build.log.$1
],[
AC_MSG_ERROR([
*** Unable to compile test source to determine kernel interfaces.])
], [yes], [])
])
dnl #
dnl # ZFS_LINUX_TEST_MODPOST
dnl #
dnl # Perform a full compile including the modpost phase. This may
dnl # be an incremental build if the objects have already been built.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_MODPOST], [
ZFS_LINUX_COMPILE([$2], [test -f $2/build.log], [
mv $2/Makefile $2/Makefile.modpost.$1
cat $2/build.log >>build/build.log.$1
],[
AC_MSG_ERROR([
*** Unable to modpost test source to determine kernel interfaces.])
], [], [yes])
])
dnl #
dnl # Perform the compilation of the test cases in two phases.
dnl #
dnl # Phase 1) attempt to build the object files for all of the tests
dnl # defined by the ZFS_LINUX_TEST_SRC macro. But do not
dnl # perform the final modpost stage.
dnl #
dnl # Phase 2) disable all tests which failed the initial compilation,
dnl # then invoke the final modpost step for the remaining tests.
dnl #
dnl # This allows us efficiently build the test cases in parallel while
dnl # remaining resilient to build failures which are expected when
dnl # detecting the available kernel interfaces.
dnl #
dnl # The maximum allowed parallelism can be controlled by setting the
dnl # TEST_JOBS environment variable. Otherwise, it default to $(nproc).
dnl #
AC_DEFUN([ZFS_LINUX_TEST_COMPILE_ALL], [
dnl # Phase 1 - Compilation only, final linking is skipped.
ZFS_LINUX_TEST_COMPILE([$1], [build])
dnl #
dnl # Phase 2 - When building external modules disable test cases
dnl # which failed to compile and invoke modpost to verify the
dnl # final linking.
dnl #
dnl # Test names suffixed with '_license' call modpost independently
dnl # to ensure that a single incompatibility does not result in the
dnl # modpost phase exiting early. This check is not performed on
dnl # every symbol since the majority are compatible and doing so
dnl # would significantly slow down this phase.
dnl #
dnl # When configuring for builtin (--enable-linux-builtin)
dnl # fake the linking step artificially create the expected .ko
dnl # files for tests which did compile. This is required for
dnl # kernels which do not have loadable module support or have
dnl # not yet been built.
dnl #
AS_IF([test "x$enable_linux_builtin" = "xno"], [
for dir in $(awk '/^obj-m/ { print [$]3 }' \
build/Makefile.compile.$1); do
name=${dir%/}
AS_IF([test -f build/$name/$name.o], [
AS_IF([test "${name##*_}" = "license"], [
ZFS_LINUX_TEST_MODPOST([$1],
[build/$name])
echo "obj-n += $dir" >>build/Makefile
], [
echo "obj-m += $dir" >>build/Makefile
])
], [
echo "obj-n += $dir" >>build/Makefile
])
done
ZFS_LINUX_TEST_MODPOST([$1], [build])
], [
for dir in $(awk '/^obj-m/ { print [$]3 }' \
build/Makefile.compile.$1); do
name=${dir%/}
AS_IF([test -f build/$name/$name.o], [
touch build/$name/$name.ko
])
done
])
])
dnl #
dnl # ZFS_LINUX_TEST_SRC
dnl #
dnl # $1 - name
dnl # $2 - global
dnl # $3 - source
dnl # $4 - extra cflags
dnl # $5 - check license-compatibility
dnl #
dnl # Check if the test source is buildable at all and then if it is
dnl # license compatible.
dnl #
dnl # N.B because all of the test cases are compiled in parallel they
dnl # must never depend on the results of previous tests. Each test
dnl # needs to be entirely independent.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_SRC], [
ZFS_LINUX_CONFTEST_C([ZFS_LINUX_TEST_PROGRAM([[$2]], [[$3]],
[["Dual BSD/GPL"]])], [$1])
ZFS_LINUX_CONFTEST_MAKEFILE([$1], [yes], [$4])
AS_IF([ test -n "$5" ], [
ZFS_LINUX_CONFTEST_C([ZFS_LINUX_TEST_PROGRAM(
[[$2]], [[$3]], [[$5]])], [$1_license])
ZFS_LINUX_CONFTEST_MAKEFILE([$1_license], [yes], [$4])
])
])
dnl #
dnl # ZFS_LINUX_TEST_RESULT
dnl #
dnl # $1 - name of a test source (ZFS_LINUX_TEST_SRC)
dnl # $2 - run on success (valid .ko generated)
dnl # $3 - run on failure (unable to compile)
dnl #
AC_DEFUN([ZFS_LINUX_TEST_RESULT], [
AS_IF([test -d build/$1], [
AS_IF([test -f build/$1/$1.ko], [$2], [$3])
], [
AC_MSG_ERROR([
*** No matching source for the "$1" test, check that
*** both the test source and result macros refer to the same name.
])
])
])
dnl #
dnl # ZFS_LINUX_TEST_ERROR
dnl #
dnl # Generic error message which can be used when none of the expected
dnl # kernel interfaces were detected.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_ERROR], [
AC_MSG_ERROR([
*** None of the expected "$1" interfaces were detected.
*** This may be because your kernel version is newer than what is
*** supported, or you are using a patched custom kernel with
*** incompatible modifications.
***
*** ZFS Version: $ZFS_META_ALIAS
*** Compatible Kernels: $ZFS_META_KVER_MIN - $ZFS_META_KVER_MAX
])
])
dnl #
dnl # ZFS_LINUX_TEST_RESULT_SYMBOL
dnl #
dnl # Like ZFS_LINUX_TEST_RESULT except ZFS_CHECK_SYMBOL_EXPORT is called to
dnl # verify symbol exports, unless --enable-linux-builtin was provided to
dnl # configure.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_RESULT_SYMBOL], [
AS_IF([ ! test -f build/$1/$1.ko], [
$5
], [
AS_IF([test "x$enable_linux_builtin" != "xyes"], [
ZFS_CHECK_SYMBOL_EXPORT([$2], [$3], [$4], [$5])
], [
$4
])
])
])
dnl #
dnl # ZFS_LINUX_COMPILE_IFELSE
dnl #
AC_DEFUN([ZFS_LINUX_COMPILE_IFELSE], [
ZFS_LINUX_TEST_REMOVE([conftest])
m4_ifvaln([$1], [ZFS_LINUX_CONFTEST_C([$1], [conftest])])
m4_ifvaln([$5], [ZFS_LINUX_CONFTEST_H([$5], [conftest])],
[ZFS_LINUX_CONFTEST_H([], [conftest])])
ZFS_LINUX_CONFTEST_MAKEFILE([conftest], [no],
[m4_ifvaln([$5], [-I$PWD/build/conftest], [])])
ZFS_LINUX_COMPILE([build/conftest], [$2], [$3], [$4], [], [])
])
dnl #
dnl # ZFS_LINUX_TRY_COMPILE
dnl #
dnl # $1 - global
dnl # $2 - source
dnl # $3 - run on success (valid .ko generated)
dnl # $4 - run on failure (unable to compile)
dnl #
dnl # When configuring as builtin (--enable-linux-builtin) for kernels
dnl # without loadable module support (CONFIG_MODULES=n) only the object
dnl # file is created. See ZFS_LINUX_TEST_COMPILE_ALL for details.
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE], [
AS_IF([test "x$enable_linux_builtin" = "xyes"], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
[[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.o], [$3], [$4])
], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
[[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.ko], [$3], [$4])
])
])
dnl #
dnl # ZFS_CHECK_SYMBOL_EXPORT
dnl #
dnl # Check if a symbol is exported on not by consulting the symbols
dnl # file, or optionally the source code.
dnl #
AC_DEFUN([ZFS_CHECK_SYMBOL_EXPORT], [
grep -q -E '[[[:space:]]]$1[[[:space:]]]' \
$LINUX_OBJ/$LINUX_SYMBOLS 2>/dev/null
rc=$?
if test $rc -ne 0; then
export=0
for file in $2; do
grep -q -E "EXPORT_SYMBOL.*($1)" \
"$LINUX/$file" 2>/dev/null
rc=$?
if test $rc -eq 0; then
export=1
break;
fi
done
if test $export -eq 0; then :
$4
else :
$3
fi
else :
$3
fi
])
dnl #
dnl # ZFS_LINUX_TRY_COMPILE_SYMBOL
dnl #
dnl # Like ZFS_LINUX_TRY_COMPILER except ZFS_CHECK_SYMBOL_EXPORT is called
dnl # to verify symbol exports, unless --enable-linux-builtin was provided
dnl # to configure.
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE_SYMBOL], [
ZFS_LINUX_TRY_COMPILE([$1], [$2], [rc=0], [rc=1])
if test $rc -ne 0; then :
$6
else
if test "x$enable_linux_builtin" != xyes; then
ZFS_CHECK_SYMBOL_EXPORT([$3], [$4], [rc=0], [rc=1])
fi
if test $rc -ne 0; then :
$6
else :
$5
fi
fi
])
dnl #
dnl # ZFS_LINUX_TRY_COMPILE_HEADER
dnl # like ZFS_LINUX_TRY_COMPILE, except the contents conftest.h are
dnl # provided via the fifth parameter
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE_HEADER], [
AS_IF([test "x$enable_linux_builtin" = "xyes"], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
[[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.o], [$3], [$4], [$5])
], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
[[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.ko], [$3], [$4], [$5])
])
])
dnl #
dnl # AS_VERSION_COMPARE_LE
dnl # like AS_VERSION_COMPARE_LE, but runs $3 if (and only if) $1 <= $2
dnl # AS_VERSION_COMPARE_LE (version-1, version-2, [action-if-less-or-equal], [action-if-greater])
dnl #
AC_DEFUN([AS_VERSION_COMPARE_LE], [
AS_VERSION_COMPARE([$1], [$2], [$3], [$3], [$4])
])
dnl #
dnl # ZFS_LINUX_REQUIRE_API
dnl # like ZFS_LINUX_TEST_ERROR, except only fails if the kernel is
dnl # at least some specified version.
dnl #
AC_DEFUN([ZFS_LINUX_REQUIRE_API], [
AS_VERSION_COMPARE_LE([$2], [$kernsrcver], [
AC_MSG_ERROR([
*** None of the expected "$1" interfaces were detected. This
*** interface is expected for kernels version "$2" and above.
*** This may be because your kernel version is newer than what is
*** supported, or you are using a patched custom kernel with
*** incompatible modifications. Newer kernels may have incompatible
*** APIs.
***
*** ZFS Version: $ZFS_META_ALIAS
*** Compatible Kernels: $ZFS_META_KVER_MIN - $ZFS_META_KVER_MAX
])
], [
AC_MSG_RESULT(no)
])
])
diff --git a/sys/contrib/openzfs/contrib/debian/changelog b/sys/contrib/openzfs/contrib/debian/changelog
index 6273d603834a..ba42ea59fa8d 100644
--- a/sys/contrib/openzfs/contrib/debian/changelog
+++ b/sys/contrib/openzfs/contrib/debian/changelog
@@ -1,7 +1,13 @@
+openzfs-linux (2.2.0-0) unstable; urgency=low
+
+ * OpenZFS 2.2.0 is tagged.
+
+ -- Umer Saleem <usaleem@ixsystems.com> Tue, 25 Jul 2023 15:00:00 +0500
+
openzfs-linux (2.1.99-1) unstable; urgency=low
* Integrate minimally modified Debian packaging from ZFS on Linux
(source: https://salsa.debian.org/zfsonlinux-team/zfs)
* This packaging is a fork of Debian zfs-linux 2.1.6-2 release.
-- Umer Saleem <usaleem@ixsystems.com> Fri, 11 Oct 2022 15:00:00 -0400
diff --git a/sys/contrib/openzfs/contrib/debian/openzfs-zfs-test.install b/sys/contrib/openzfs/contrib/debian/openzfs-zfs-test.install
index cafcfdc0e15b..b3afef50dbd4 100644
--- a/sys/contrib/openzfs/contrib/debian/openzfs-zfs-test.install
+++ b/sys/contrib/openzfs/contrib/debian/openzfs-zfs-test.install
@@ -1,15 +1,13 @@
-sbin/zinject
sbin/ztest
usr/bin/raidz_test
usr/share/man/man1/raidz_test.1
usr/share/man/man1/test-runner.1
usr/share/man/man1/ztest.1
-usr/share/man/man8/zinject.8
usr/share/zfs/common.sh
usr/share/zfs/runfiles/
usr/share/zfs/test-runner
usr/share/zfs/zfs-tests.sh
usr/share/zfs/zfs-tests/
usr/share/zfs/zfs.sh
usr/share/zfs/zimport.sh
usr/share/zfs/zloop.sh
diff --git a/sys/contrib/openzfs/contrib/debian/openzfs-zfsutils.install b/sys/contrib/openzfs/contrib/debian/openzfs-zfsutils.install
index 49f0ec0d5d92..301d8f67b3af 100644
--- a/sys/contrib/openzfs/contrib/debian/openzfs-zfsutils.install
+++ b/sys/contrib/openzfs/contrib/debian/openzfs-zfsutils.install
@@ -1,135 +1,135 @@
etc/default/zfs
etc/zfs/zfs-functions
etc/zfs/zpool.d/
etc/bash_completion.d/zfs
lib/systemd/system-generators/
lib/systemd/system-preset/
lib/systemd/system/zfs-import-cache.service
lib/systemd/system/zfs-import-scan.service
lib/systemd/system/zfs-import.target
lib/systemd/system/zfs-load-key.service
lib/systemd/system/zfs-mount.service
lib/systemd/system/zfs-scrub-monthly@.timer
lib/systemd/system/zfs-scrub-weekly@.timer
lib/systemd/system/zfs-scrub@.service
lib/systemd/system/zfs-trim-monthly@.timer
lib/systemd/system/zfs-trim-weekly@.timer
lib/systemd/system/zfs-trim@.service
lib/systemd/system/zfs-share.service
lib/systemd/system/zfs-volume-wait.service
lib/systemd/system/zfs-volumes.target
lib/systemd/system/zfs.target
lib/udev/
sbin/fsck.zfs
sbin/mount.zfs
sbin/zdb
sbin/zfs
sbin/zfs_ids_to_path
sbin/zgenhostid
sbin/zhack
+sbin/zinject
sbin/zpool
sbin/zstream
sbin/zstreamdump
usr/bin/zvol_wait
usr/lib/modules-load.d/ lib/
usr/lib/zfs-linux/zpool.d/
usr/lib/zfs-linux/zpool_influxdb
usr/sbin/arc_summary
usr/sbin/arcstat
usr/sbin/dbufstat
usr/sbin/zilstat
usr/share/zfs/compatibility.d/
usr/share/bash-completion/completions
usr/share/man/man1/arcstat.1
usr/share/man/man1/zhack.1
usr/share/man/man1/zvol_wait.1
usr/share/man/man5/
usr/share/man/man8/fsck.zfs.8
usr/share/man/man8/mount.zfs.8
usr/share/man/man8/vdev_id.8
usr/share/man/man8/zdb.8
usr/share/man/man8/zfs-allow.8
usr/share/man/man8/zfs-bookmark.8
usr/share/man/man8/zfs-change-key.8
usr/share/man/man8/zfs-clone.8
usr/share/man/man8/zfs-create.8
usr/share/man/man8/zfs-destroy.8
usr/share/man/man8/zfs-diff.8
usr/share/man/man8/zfs-get.8
usr/share/man/man8/zfs-groupspace.8
usr/share/man/man8/zfs-hold.8
usr/share/man/man8/zfs-inherit.8
-usr/share/man/man8/zfs-jail.8
usr/share/man/man8/zfs-list.8
usr/share/man/man8/zfs-load-key.8
usr/share/man/man8/zfs-mount-generator.8
usr/share/man/man8/zfs-mount.8
usr/share/man/man8/zfs-program.8
usr/share/man/man8/zfs-project.8
usr/share/man/man8/zfs-projectspace.8
usr/share/man/man8/zfs-promote.8
usr/share/man/man8/zfs-receive.8
usr/share/man/man8/zfs-recv.8
usr/share/man/man8/zfs-redact.8
usr/share/man/man8/zfs-release.8
usr/share/man/man8/zfs-rename.8
usr/share/man/man8/zfs-rollback.8
usr/share/man/man8/zfs-send.8
usr/share/man/man8/zfs-set.8
usr/share/man/man8/zfs-share.8
usr/share/man/man8/zfs-snapshot.8
usr/share/man/man8/zfs-unallow.8
-usr/share/man/man8/zfs-unjail.8
usr/share/man/man8/zfs-unload-key.8
usr/share/man/man8/zfs-unmount.8
usr/share/man/man8/zfs-unzone.8
usr/share/man/man8/zfs-upgrade.8
usr/share/man/man8/zfs-userspace.8
usr/share/man/man8/zfs-wait.8
usr/share/man/man8/zfs-zone.8
usr/share/man/man8/zfs.8
usr/share/man/man8/zfs_ids_to_path.8
usr/share/man/man7/zfsconcepts.7
usr/share/man/man7/zfsprops.7
usr/share/man/man8/zgenhostid.8
+usr/share/man/man8/zinject.8
usr/share/man/man8/zpool-add.8
usr/share/man/man8/zpool-attach.8
usr/share/man/man8/zpool-checkpoint.8
usr/share/man/man8/zpool-clear.8
usr/share/man/man8/zpool-create.8
usr/share/man/man8/zpool-destroy.8
usr/share/man/man8/zpool-detach.8
usr/share/man/man8/zpool-events.8
usr/share/man/man8/zpool-export.8
usr/share/man/man8/zpool-get.8
usr/share/man/man8/zpool-history.8
usr/share/man/man8/zpool-import.8
usr/share/man/man8/zpool-initialize.8
usr/share/man/man8/zpool-iostat.8
usr/share/man/man8/zpool-labelclear.8
usr/share/man/man8/zpool-list.8
usr/share/man/man8/zpool-offline.8
usr/share/man/man8/zpool-online.8
usr/share/man/man8/zpool-reguid.8
usr/share/man/man8/zpool-remove.8
usr/share/man/man8/zpool-reopen.8
usr/share/man/man8/zpool-replace.8
usr/share/man/man8/zpool-resilver.8
usr/share/man/man8/zpool-scrub.8
usr/share/man/man8/zpool-set.8
usr/share/man/man8/zpool-split.8
usr/share/man/man8/zpool-status.8
usr/share/man/man8/zpool-sync.8
usr/share/man/man8/zpool-trim.8
usr/share/man/man8/zpool-upgrade.8
usr/share/man/man8/zpool-wait.8
usr/share/man/man8/zpool.8
usr/share/man/man7/vdevprops.7
usr/share/man/man7/zpoolconcepts.7
usr/share/man/man7/zpoolprops.7
usr/share/man/man8/zstream.8
usr/share/man/man8/zstreamdump.8
usr/share/man/man4/spl.4
usr/share/man/man4/zfs.4
usr/share/man/man7/zpool-features.7
usr/share/man/man8/zpool_influxdb.8
diff --git a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-env-bootfs.service.in b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-env-bootfs.service.in
index 7ebab4c1a58d..fe362b930bf5 100644
--- a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-env-bootfs.service.in
+++ b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-env-bootfs.service.in
@@ -1,23 +1,24 @@
[Unit]
Description=Set BOOTFS and BOOTFSFLAGS environment variables for dracut
DefaultDependencies=no
After=zfs-import-cache.service
After=zfs-import-scan.service
Before=zfs-import.target
[Service]
Type=oneshot
ExecStart=/bin/sh -c ' \
. /lib/dracut-zfs-lib.sh; \
decode_root_args || exit 0; \
[ "$root" = "zfs:AUTO" ] && root="$(@sbindir@/zpool list -H -o bootfs | grep -m1 -vFx -)"; \
rootflags="$(getarg rootflags=)"; \
- case ",$rootflags," in \
- *,zfsutil,*) ;; \
- ,,) rootflags=zfsutil ;; \
- *) rootflags="zfsutil,$rootflags" ;; \
- esac; \
+ [ "$(@sbindir@/zfs get -H -o value mountpoint "$root")" = legacy ] || \
+ case ",$rootflags," in \
+ *,zfsutil,*) ;; \
+ ,,) rootflags=zfsutil ;; \
+ *) rootflags="zfsutil,$rootflags" ;; \
+ esac; \
exec systemctl set-environment BOOTFS="$root" BOOTFSFLAGS="$rootflags"'
[Install]
WantedBy=zfs-import.target
diff --git a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-rollback-bootfs.service.in b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-rollback-bootfs.service.in
index 68fdcb1f323e..12d8ac703e37 100644
--- a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-rollback-bootfs.service.in
+++ b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-rollback-bootfs.service.in
@@ -1,13 +1,13 @@
[Unit]
Description=Rollback bootfs just before it is mounted
Requisite=zfs-import.target
After=zfs-import.target dracut-pre-mount.service zfs-snapshot-bootfs.service
-Before=dracut-mount.service
+Before=dracut-mount.service sysroot.mount
DefaultDependencies=no
ConditionKernelCommandLine=bootfs.rollback
ConditionEnvironment=BOOTFS
[Service]
Type=oneshot
ExecStart=/bin/sh -c '. /lib/dracut-lib.sh; SNAPNAME="$(getarg bootfs.rollback)"; exec @sbindir@/zfs rollback -Rf "$BOOTFS@${SNAPNAME:-%v}"'
RemainAfterExit=yes
diff --git a/sys/contrib/openzfs/include/libzfs.h b/sys/contrib/openzfs/include/libzfs.h
index a7037e3e6266..fa05b7921bb5 100644
--- a/sys/contrib/openzfs/include/libzfs.h
+++ b/sys/contrib/openzfs/include/libzfs.h
@@ -1,1031 +1,1032 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2022 by Delphix. All rights reserved.
* Copyright Joyent, Inc.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2016, Intel Corporation.
* Copyright 2016 Nexenta Systems, Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
* Copyright (c) 2019 Datto Inc.
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
*/
#ifndef _LIBZFS_H
#define _LIBZFS_H extern __attribute__((visibility("default")))
#include <assert.h>
#include <libshare.h>
#include <libnvpair.h>
#include <sys/mnttab.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/fs/zfs.h>
#include <sys/avl.h>
#include <libzfs_core.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Miscellaneous ZFS constants
*/
#define ZFS_MAXPROPLEN MAXPATHLEN
#define ZPOOL_MAXPROPLEN MAXPATHLEN
/*
* libzfs errors
*/
typedef enum zfs_error {
EZFS_SUCCESS = 0, /* no error -- success */
EZFS_NOMEM = 2000, /* out of memory */
EZFS_BADPROP, /* invalid property value */
EZFS_PROPREADONLY, /* cannot set readonly property */
EZFS_PROPTYPE, /* property does not apply to dataset type */
EZFS_PROPNONINHERIT, /* property is not inheritable */
EZFS_PROPSPACE, /* bad quota or reservation */
EZFS_BADTYPE, /* dataset is not of appropriate type */
EZFS_BUSY, /* pool or dataset is busy */
EZFS_EXISTS, /* pool or dataset already exists */
EZFS_NOENT, /* no such pool or dataset */
EZFS_BADSTREAM, /* bad backup stream */
EZFS_DSREADONLY, /* dataset is readonly */
EZFS_VOLTOOBIG, /* volume is too large for 32-bit system */
EZFS_INVALIDNAME, /* invalid dataset name */
EZFS_BADRESTORE, /* unable to restore to destination */
EZFS_BADBACKUP, /* backup failed */
EZFS_BADTARGET, /* bad attach/detach/replace target */
EZFS_NODEVICE, /* no such device in pool */
EZFS_BADDEV, /* invalid device to add */
EZFS_NOREPLICAS, /* no valid replicas */
EZFS_RESILVERING, /* resilvering (healing reconstruction) */
EZFS_BADVERSION, /* unsupported version */
EZFS_POOLUNAVAIL, /* pool is currently unavailable */
EZFS_DEVOVERFLOW, /* too many devices in one vdev */
EZFS_BADPATH, /* must be an absolute path */
EZFS_CROSSTARGET, /* rename or clone across pool or dataset */
EZFS_ZONED, /* used improperly in local zone */
EZFS_MOUNTFAILED, /* failed to mount dataset */
EZFS_UMOUNTFAILED, /* failed to unmount dataset */
EZFS_UNSHARENFSFAILED, /* failed to unshare over nfs */
EZFS_SHARENFSFAILED, /* failed to share over nfs */
EZFS_PERM, /* permission denied */
EZFS_NOSPC, /* out of space */
EZFS_FAULT, /* bad address */
EZFS_IO, /* I/O error */
EZFS_INTR, /* signal received */
EZFS_ISSPARE, /* device is a hot spare */
EZFS_INVALCONFIG, /* invalid vdev configuration */
EZFS_RECURSIVE, /* recursive dependency */
EZFS_NOHISTORY, /* no history object */
EZFS_POOLPROPS, /* couldn't retrieve pool props */
EZFS_POOL_NOTSUP, /* ops not supported for this type of pool */
EZFS_POOL_INVALARG, /* invalid argument for this pool operation */
EZFS_NAMETOOLONG, /* dataset name is too long */
EZFS_OPENFAILED, /* open of device failed */
EZFS_NOCAP, /* couldn't get capacity */
EZFS_LABELFAILED, /* write of label failed */
EZFS_BADWHO, /* invalid permission who */
EZFS_BADPERM, /* invalid permission */
EZFS_BADPERMSET, /* invalid permission set name */
EZFS_NODELEGATION, /* delegated administration is disabled */
EZFS_UNSHARESMBFAILED, /* failed to unshare over smb */
EZFS_SHARESMBFAILED, /* failed to share over smb */
EZFS_BADCACHE, /* bad cache file */
EZFS_ISL2CACHE, /* device is for the level 2 ARC */
EZFS_VDEVNOTSUP, /* unsupported vdev type */
EZFS_NOTSUP, /* ops not supported on this dataset */
EZFS_ACTIVE_SPARE, /* pool has active shared spare devices */
EZFS_UNPLAYED_LOGS, /* log device has unplayed logs */
EZFS_REFTAG_RELE, /* snapshot release: tag not found */
EZFS_REFTAG_HOLD, /* snapshot hold: tag already exists */
EZFS_TAGTOOLONG, /* snapshot hold/rele: tag too long */
EZFS_PIPEFAILED, /* pipe create failed */
EZFS_THREADCREATEFAILED, /* thread create failed */
EZFS_POSTSPLIT_ONLINE, /* onlining a disk after splitting it */
EZFS_SCRUBBING, /* currently scrubbing */
EZFS_ERRORSCRUBBING, /* currently error scrubbing */
EZFS_ERRORSCRUB_PAUSED, /* error scrub currently paused */
EZFS_NO_SCRUB, /* no active scrub */
EZFS_DIFF, /* general failure of zfs diff */
EZFS_DIFFDATA, /* bad zfs diff data */
EZFS_POOLREADONLY, /* pool is in read-only mode */
EZFS_SCRUB_PAUSED, /* scrub currently paused */
EZFS_SCRUB_PAUSED_TO_CANCEL, /* scrub currently paused */
EZFS_ACTIVE_POOL, /* pool is imported on a different system */
EZFS_CRYPTOFAILED, /* failed to setup encryption */
EZFS_NO_PENDING, /* cannot cancel, no operation is pending */
EZFS_CHECKPOINT_EXISTS, /* checkpoint exists */
EZFS_DISCARDING_CHECKPOINT, /* currently discarding a checkpoint */
EZFS_NO_CHECKPOINT, /* pool has no checkpoint */
EZFS_DEVRM_IN_PROGRESS, /* a device is currently being removed */
EZFS_VDEV_TOO_BIG, /* a device is too big to be used */
EZFS_IOC_NOTSUPPORTED, /* operation not supported by zfs module */
EZFS_TOOMANY, /* argument list too long */
EZFS_INITIALIZING, /* currently initializing */
EZFS_NO_INITIALIZE, /* no active initialize */
EZFS_WRONG_PARENT, /* invalid parent dataset (e.g ZVOL) */
EZFS_TRIMMING, /* currently trimming */
EZFS_NO_TRIM, /* no active trim */
EZFS_TRIM_NOTSUP, /* device does not support trim */
EZFS_NO_RESILVER_DEFER, /* pool doesn't support resilver_defer */
EZFS_EXPORT_IN_PROGRESS, /* currently exporting the pool */
EZFS_REBUILDING, /* resilvering (sequential reconstrution) */
EZFS_VDEV_NOTSUP, /* ops not supported for this type of vdev */
EZFS_NOT_USER_NAMESPACE, /* a file is not a user namespace */
EZFS_CKSUM, /* insufficient replicas */
EZFS_RESUME_EXISTS, /* Resume on existing dataset without force */
+ EZFS_SHAREFAILED, /* filesystem share failed */
EZFS_UNKNOWN
} zfs_error_t;
/*
* The following data structures are all part
* of the zfs_allow_t data structure which is
* used for printing 'allow' permissions.
* It is a linked list of zfs_allow_t's which
* then contain avl tree's for user/group/sets/...
* and each one of the entries in those trees have
* avl tree's for the permissions they belong to and
* whether they are local,descendent or local+descendent
* permissions. The AVL trees are used primarily for
* sorting purposes, but also so that we can quickly find
* a given user and or permission.
*/
typedef struct zfs_perm_node {
avl_node_t z_node;
char z_pname[MAXPATHLEN];
} zfs_perm_node_t;
typedef struct zfs_allow_node {
avl_node_t z_node;
char z_key[MAXPATHLEN]; /* name, such as joe */
avl_tree_t z_localdescend; /* local+descendent perms */
avl_tree_t z_local; /* local permissions */
avl_tree_t z_descend; /* descendent permissions */
} zfs_allow_node_t;
typedef struct zfs_allow {
struct zfs_allow *z_next;
char z_setpoint[MAXPATHLEN];
avl_tree_t z_sets;
avl_tree_t z_crperms;
avl_tree_t z_user;
avl_tree_t z_group;
avl_tree_t z_everyone;
} zfs_allow_t;
/*
* Basic handle types
*/
typedef struct zfs_handle zfs_handle_t;
typedef struct zpool_handle zpool_handle_t;
typedef struct libzfs_handle libzfs_handle_t;
_LIBZFS_H int zpool_wait(zpool_handle_t *, zpool_wait_activity_t);
_LIBZFS_H int zpool_wait_status(zpool_handle_t *, zpool_wait_activity_t,
boolean_t *, boolean_t *);
/*
* Library initialization
*/
_LIBZFS_H libzfs_handle_t *libzfs_init(void);
_LIBZFS_H void libzfs_fini(libzfs_handle_t *);
_LIBZFS_H libzfs_handle_t *zpool_get_handle(zpool_handle_t *);
_LIBZFS_H libzfs_handle_t *zfs_get_handle(zfs_handle_t *);
_LIBZFS_H void libzfs_print_on_error(libzfs_handle_t *, boolean_t);
_LIBZFS_H void zfs_save_arguments(int argc, char **, char *, int);
_LIBZFS_H int zpool_log_history(libzfs_handle_t *, const char *);
_LIBZFS_H int libzfs_errno(libzfs_handle_t *);
_LIBZFS_H const char *libzfs_error_init(int);
_LIBZFS_H const char *libzfs_error_action(libzfs_handle_t *);
_LIBZFS_H const char *libzfs_error_description(libzfs_handle_t *);
_LIBZFS_H int zfs_standard_error(libzfs_handle_t *, int, const char *);
_LIBZFS_H void libzfs_mnttab_init(libzfs_handle_t *);
_LIBZFS_H void libzfs_mnttab_fini(libzfs_handle_t *);
_LIBZFS_H void libzfs_mnttab_cache(libzfs_handle_t *, boolean_t);
_LIBZFS_H int libzfs_mnttab_find(libzfs_handle_t *, const char *,
struct mnttab *);
_LIBZFS_H void libzfs_mnttab_add(libzfs_handle_t *, const char *,
const char *, const char *);
_LIBZFS_H void libzfs_mnttab_remove(libzfs_handle_t *, const char *);
/*
* Basic handle functions
*/
_LIBZFS_H zpool_handle_t *zpool_open(libzfs_handle_t *, const char *);
_LIBZFS_H zpool_handle_t *zpool_open_canfail(libzfs_handle_t *, const char *);
_LIBZFS_H void zpool_close(zpool_handle_t *);
_LIBZFS_H const char *zpool_get_name(zpool_handle_t *);
_LIBZFS_H int zpool_get_state(zpool_handle_t *);
_LIBZFS_H const char *zpool_state_to_name(vdev_state_t, vdev_aux_t);
_LIBZFS_H const char *zpool_pool_state_to_name(pool_state_t);
_LIBZFS_H void zpool_free_handles(libzfs_handle_t *);
/*
* Iterate over all active pools in the system.
*/
typedef int (*zpool_iter_f)(zpool_handle_t *, void *);
_LIBZFS_H int zpool_iter(libzfs_handle_t *, zpool_iter_f, void *);
_LIBZFS_H boolean_t zpool_skip_pool(const char *);
/*
* Functions to create and destroy pools
*/
_LIBZFS_H int zpool_create(libzfs_handle_t *, const char *, nvlist_t *,
nvlist_t *, nvlist_t *);
_LIBZFS_H int zpool_destroy(zpool_handle_t *, const char *);
_LIBZFS_H int zpool_add(zpool_handle_t *, nvlist_t *);
typedef struct splitflags {
/* do not split, but return the config that would be split off */
unsigned int dryrun : 1;
/* after splitting, import the pool */
unsigned int import : 1;
int name_flags;
} splitflags_t;
typedef struct trimflags {
/* requested vdevs are for the entire pool */
boolean_t fullpool;
/* request a secure trim, requires support from device */
boolean_t secure;
/* after starting trim, block until trim completes */
boolean_t wait;
/* trim at the requested rate in bytes/second */
uint64_t rate;
} trimflags_t;
/*
* Functions to manipulate pool and vdev state
*/
_LIBZFS_H int zpool_scan(zpool_handle_t *, pool_scan_func_t, pool_scrub_cmd_t);
_LIBZFS_H int zpool_initialize(zpool_handle_t *, pool_initialize_func_t,
nvlist_t *);
_LIBZFS_H int zpool_initialize_wait(zpool_handle_t *, pool_initialize_func_t,
nvlist_t *);
_LIBZFS_H int zpool_trim(zpool_handle_t *, pool_trim_func_t, nvlist_t *,
trimflags_t *);
_LIBZFS_H int zpool_clear(zpool_handle_t *, const char *, nvlist_t *);
_LIBZFS_H int zpool_reguid(zpool_handle_t *);
_LIBZFS_H int zpool_reopen_one(zpool_handle_t *, void *);
_LIBZFS_H int zpool_sync_one(zpool_handle_t *, void *);
_LIBZFS_H int zpool_vdev_online(zpool_handle_t *, const char *, int,
vdev_state_t *);
_LIBZFS_H int zpool_vdev_offline(zpool_handle_t *, const char *, boolean_t);
_LIBZFS_H int zpool_vdev_attach(zpool_handle_t *, const char *,
const char *, nvlist_t *, int, boolean_t);
_LIBZFS_H int zpool_vdev_detach(zpool_handle_t *, const char *);
_LIBZFS_H int zpool_vdev_remove(zpool_handle_t *, const char *);
_LIBZFS_H int zpool_vdev_remove_cancel(zpool_handle_t *);
_LIBZFS_H int zpool_vdev_indirect_size(zpool_handle_t *, const char *,
uint64_t *);
_LIBZFS_H int zpool_vdev_split(zpool_handle_t *, char *, nvlist_t **,
nvlist_t *, splitflags_t);
_LIBZFS_H int zpool_vdev_remove_wanted(zpool_handle_t *, const char *);
_LIBZFS_H int zpool_vdev_fault(zpool_handle_t *, uint64_t, vdev_aux_t);
_LIBZFS_H int zpool_vdev_degrade(zpool_handle_t *, uint64_t, vdev_aux_t);
_LIBZFS_H int zpool_vdev_clear(zpool_handle_t *, uint64_t);
_LIBZFS_H nvlist_t *zpool_find_vdev(zpool_handle_t *, const char *, boolean_t *,
boolean_t *, boolean_t *);
_LIBZFS_H nvlist_t *zpool_find_vdev_by_physpath(zpool_handle_t *, const char *,
boolean_t *, boolean_t *, boolean_t *);
_LIBZFS_H int zpool_label_disk(libzfs_handle_t *, zpool_handle_t *,
const char *);
_LIBZFS_H uint64_t zpool_vdev_path_to_guid(zpool_handle_t *zhp,
const char *path);
_LIBZFS_H const char *zpool_get_state_str(zpool_handle_t *);
/*
* Functions to manage pool properties
*/
_LIBZFS_H int zpool_set_prop(zpool_handle_t *, const char *, const char *);
_LIBZFS_H int zpool_get_prop(zpool_handle_t *, zpool_prop_t, char *,
size_t proplen, zprop_source_t *, boolean_t literal);
_LIBZFS_H int zpool_get_userprop(zpool_handle_t *, const char *, char *,
size_t proplen, zprop_source_t *);
_LIBZFS_H uint64_t zpool_get_prop_int(zpool_handle_t *, zpool_prop_t,
zprop_source_t *);
_LIBZFS_H int zpool_props_refresh(zpool_handle_t *);
_LIBZFS_H const char *zpool_prop_to_name(zpool_prop_t);
_LIBZFS_H const char *zpool_prop_values(zpool_prop_t);
/*
* Functions to manage vdev properties
*/
_LIBZFS_H int zpool_get_vdev_prop_value(nvlist_t *, vdev_prop_t, char *, char *,
size_t, zprop_source_t *, boolean_t);
_LIBZFS_H int zpool_get_vdev_prop(zpool_handle_t *, const char *, vdev_prop_t,
char *, char *, size_t, zprop_source_t *, boolean_t);
_LIBZFS_H int zpool_get_all_vdev_props(zpool_handle_t *, const char *,
nvlist_t **);
_LIBZFS_H int zpool_set_vdev_prop(zpool_handle_t *, const char *, const char *,
const char *);
_LIBZFS_H const char *vdev_prop_to_name(vdev_prop_t);
_LIBZFS_H const char *vdev_prop_values(vdev_prop_t);
_LIBZFS_H boolean_t vdev_prop_user(const char *name);
_LIBZFS_H const char *vdev_prop_column_name(vdev_prop_t);
_LIBZFS_H boolean_t vdev_prop_align_right(vdev_prop_t);
/*
* Pool health statistics.
*/
typedef enum {
/*
* The following correspond to faults as defined in the (fault.fs.zfs.*)
* event namespace. Each is associated with a corresponding message ID.
* This must be kept in sync with the zfs_msgid_table in
* lib/libzfs/libzfs_status.c.
*/
ZPOOL_STATUS_CORRUPT_CACHE, /* corrupt /kernel/drv/zpool.cache */
ZPOOL_STATUS_MISSING_DEV_R, /* missing device with replicas */
ZPOOL_STATUS_MISSING_DEV_NR, /* missing device with no replicas */
ZPOOL_STATUS_CORRUPT_LABEL_R, /* bad device label with replicas */
ZPOOL_STATUS_CORRUPT_LABEL_NR, /* bad device label with no replicas */
ZPOOL_STATUS_BAD_GUID_SUM, /* sum of device guids didn't match */
ZPOOL_STATUS_CORRUPT_POOL, /* pool metadata is corrupted */
ZPOOL_STATUS_CORRUPT_DATA, /* data errors in user (meta)data */
ZPOOL_STATUS_FAILING_DEV, /* device experiencing errors */
ZPOOL_STATUS_VERSION_NEWER, /* newer on-disk version */
ZPOOL_STATUS_HOSTID_MISMATCH, /* last accessed by another system */
ZPOOL_STATUS_HOSTID_ACTIVE, /* currently active on another system */
ZPOOL_STATUS_HOSTID_REQUIRED, /* multihost=on and hostid=0 */
ZPOOL_STATUS_IO_FAILURE_WAIT, /* failed I/O, failmode 'wait' */
ZPOOL_STATUS_IO_FAILURE_CONTINUE, /* failed I/O, failmode 'continue' */
ZPOOL_STATUS_IO_FAILURE_MMP, /* failed MMP, failmode not 'panic' */
ZPOOL_STATUS_BAD_LOG, /* cannot read log chain(s) */
ZPOOL_STATUS_ERRATA, /* informational errata available */
/*
* If the pool has unsupported features but can still be opened in
* read-only mode, its status is ZPOOL_STATUS_UNSUP_FEAT_WRITE. If the
* pool has unsupported features but cannot be opened at all, its
* status is ZPOOL_STATUS_UNSUP_FEAT_READ.
*/
ZPOOL_STATUS_UNSUP_FEAT_READ, /* unsupported features for read */
ZPOOL_STATUS_UNSUP_FEAT_WRITE, /* unsupported features for write */
/*
* These faults have no corresponding message ID. At the time we are
* checking the status, the original reason for the FMA fault (I/O or
* checksum errors) has been lost.
*/
ZPOOL_STATUS_FAULTED_DEV_R, /* faulted device with replicas */
ZPOOL_STATUS_FAULTED_DEV_NR, /* faulted device with no replicas */
/*
* The following are not faults per se, but still an error possibly
* requiring administrative attention. There is no corresponding
* message ID.
*/
ZPOOL_STATUS_VERSION_OLDER, /* older legacy on-disk version */
ZPOOL_STATUS_FEAT_DISABLED, /* supported features are disabled */
ZPOOL_STATUS_RESILVERING, /* device being resilvered */
ZPOOL_STATUS_OFFLINE_DEV, /* device offline */
ZPOOL_STATUS_REMOVED_DEV, /* removed device */
ZPOOL_STATUS_REBUILDING, /* device being rebuilt */
ZPOOL_STATUS_REBUILD_SCRUB, /* recommend scrubbing the pool */
ZPOOL_STATUS_NON_NATIVE_ASHIFT, /* (e.g. 512e dev with ashift of 9) */
ZPOOL_STATUS_COMPATIBILITY_ERR, /* bad 'compatibility' property */
ZPOOL_STATUS_INCOMPATIBLE_FEAT, /* feature set outside compatibility */
/*
* Finally, the following indicates a healthy pool.
*/
ZPOOL_STATUS_OK
} zpool_status_t;
_LIBZFS_H zpool_status_t zpool_get_status(zpool_handle_t *, const char **,
zpool_errata_t *);
_LIBZFS_H zpool_status_t zpool_import_status(nvlist_t *, const char **,
zpool_errata_t *);
/*
* Statistics and configuration functions.
*/
_LIBZFS_H nvlist_t *zpool_get_config(zpool_handle_t *, nvlist_t **);
_LIBZFS_H nvlist_t *zpool_get_features(zpool_handle_t *);
_LIBZFS_H int zpool_refresh_stats(zpool_handle_t *, boolean_t *);
_LIBZFS_H int zpool_get_errlog(zpool_handle_t *, nvlist_t **);
/*
* Import and export functions
*/
_LIBZFS_H int zpool_export(zpool_handle_t *, boolean_t, const char *);
_LIBZFS_H int zpool_export_force(zpool_handle_t *, const char *);
_LIBZFS_H int zpool_import(libzfs_handle_t *, nvlist_t *, const char *,
char *altroot);
_LIBZFS_H int zpool_import_props(libzfs_handle_t *, nvlist_t *, const char *,
nvlist_t *, int);
_LIBZFS_H void zpool_print_unsup_feat(nvlist_t *config);
/*
* Miscellaneous pool functions
*/
struct zfs_cmd;
_LIBZFS_H const char *const zfs_history_event_names[];
typedef enum {
VDEV_NAME_PATH = 1 << 0,
VDEV_NAME_GUID = 1 << 1,
VDEV_NAME_FOLLOW_LINKS = 1 << 2,
VDEV_NAME_TYPE_ID = 1 << 3,
} vdev_name_t;
_LIBZFS_H char *zpool_vdev_name(libzfs_handle_t *, zpool_handle_t *, nvlist_t *,
int name_flags);
_LIBZFS_H int zpool_upgrade(zpool_handle_t *, uint64_t);
_LIBZFS_H int zpool_get_history(zpool_handle_t *, nvlist_t **, uint64_t *,
boolean_t *);
_LIBZFS_H int zpool_events_next(libzfs_handle_t *, nvlist_t **, int *, unsigned,
int);
_LIBZFS_H int zpool_events_clear(libzfs_handle_t *, int *);
_LIBZFS_H int zpool_events_seek(libzfs_handle_t *, uint64_t, int);
_LIBZFS_H void zpool_obj_to_path_ds(zpool_handle_t *, uint64_t, uint64_t,
char *, size_t);
_LIBZFS_H void zpool_obj_to_path(zpool_handle_t *, uint64_t, uint64_t, char *,
size_t);
_LIBZFS_H int zfs_ioctl(libzfs_handle_t *, int, struct zfs_cmd *);
_LIBZFS_H void zpool_explain_recover(libzfs_handle_t *, const char *, int,
nvlist_t *);
_LIBZFS_H int zpool_checkpoint(zpool_handle_t *);
_LIBZFS_H int zpool_discard_checkpoint(zpool_handle_t *);
_LIBZFS_H boolean_t zpool_is_draid_spare(const char *);
/*
* Basic handle manipulations. These functions do not create or destroy the
* underlying datasets, only the references to them.
*/
_LIBZFS_H zfs_handle_t *zfs_open(libzfs_handle_t *, const char *, int);
_LIBZFS_H zfs_handle_t *zfs_handle_dup(zfs_handle_t *);
_LIBZFS_H void zfs_close(zfs_handle_t *);
_LIBZFS_H zfs_type_t zfs_get_type(const zfs_handle_t *);
_LIBZFS_H zfs_type_t zfs_get_underlying_type(const zfs_handle_t *);
_LIBZFS_H const char *zfs_get_name(const zfs_handle_t *);
_LIBZFS_H zpool_handle_t *zfs_get_pool_handle(const zfs_handle_t *);
_LIBZFS_H const char *zfs_get_pool_name(const zfs_handle_t *);
/*
* Property management functions. Some functions are shared with the kernel,
* and are found in sys/fs/zfs.h.
*/
/*
* zfs dataset property management
*/
_LIBZFS_H const char *zfs_prop_default_string(zfs_prop_t);
_LIBZFS_H uint64_t zfs_prop_default_numeric(zfs_prop_t);
_LIBZFS_H const char *zfs_prop_column_name(zfs_prop_t);
_LIBZFS_H boolean_t zfs_prop_align_right(zfs_prop_t);
_LIBZFS_H nvlist_t *zfs_valid_proplist(libzfs_handle_t *, zfs_type_t,
nvlist_t *, uint64_t, zfs_handle_t *, zpool_handle_t *, boolean_t,
const char *);
_LIBZFS_H const char *zfs_prop_to_name(zfs_prop_t);
_LIBZFS_H int zfs_prop_set(zfs_handle_t *, const char *, const char *);
_LIBZFS_H int zfs_prop_set_list(zfs_handle_t *, nvlist_t *);
_LIBZFS_H int zfs_prop_get(zfs_handle_t *, zfs_prop_t, char *, size_t,
zprop_source_t *, char *, size_t, boolean_t);
_LIBZFS_H int zfs_prop_get_recvd(zfs_handle_t *, const char *, char *, size_t,
boolean_t);
_LIBZFS_H int zfs_prop_get_numeric(zfs_handle_t *, zfs_prop_t, uint64_t *,
zprop_source_t *, char *, size_t);
_LIBZFS_H int zfs_prop_get_userquota_int(zfs_handle_t *zhp,
const char *propname, uint64_t *propvalue);
_LIBZFS_H int zfs_prop_get_userquota(zfs_handle_t *zhp, const char *propname,
char *propbuf, int proplen, boolean_t literal);
_LIBZFS_H int zfs_prop_get_written_int(zfs_handle_t *zhp, const char *propname,
uint64_t *propvalue);
_LIBZFS_H int zfs_prop_get_written(zfs_handle_t *zhp, const char *propname,
char *propbuf, int proplen, boolean_t literal);
_LIBZFS_H int zfs_prop_get_feature(zfs_handle_t *zhp, const char *propname,
char *buf, size_t len);
_LIBZFS_H uint64_t getprop_uint64(zfs_handle_t *, zfs_prop_t, const char **);
_LIBZFS_H uint64_t zfs_prop_get_int(zfs_handle_t *, zfs_prop_t);
_LIBZFS_H int zfs_prop_inherit(zfs_handle_t *, const char *, boolean_t);
_LIBZFS_H const char *zfs_prop_values(zfs_prop_t);
_LIBZFS_H int zfs_prop_is_string(zfs_prop_t prop);
_LIBZFS_H nvlist_t *zfs_get_all_props(zfs_handle_t *);
_LIBZFS_H nvlist_t *zfs_get_user_props(zfs_handle_t *);
_LIBZFS_H nvlist_t *zfs_get_recvd_props(zfs_handle_t *);
_LIBZFS_H nvlist_t *zfs_get_clones_nvl(zfs_handle_t *);
_LIBZFS_H int zfs_wait_status(zfs_handle_t *, zfs_wait_activity_t,
boolean_t *, boolean_t *);
/*
* zfs encryption management
*/
_LIBZFS_H int zfs_crypto_get_encryption_root(zfs_handle_t *, boolean_t *,
char *);
_LIBZFS_H int zfs_crypto_create(libzfs_handle_t *, char *, nvlist_t *,
nvlist_t *, boolean_t stdin_available, uint8_t **, uint_t *);
_LIBZFS_H int zfs_crypto_clone_check(libzfs_handle_t *, zfs_handle_t *, char *,
nvlist_t *);
_LIBZFS_H int zfs_crypto_attempt_load_keys(libzfs_handle_t *, const char *);
_LIBZFS_H int zfs_crypto_load_key(zfs_handle_t *, boolean_t, const char *);
_LIBZFS_H int zfs_crypto_unload_key(zfs_handle_t *);
_LIBZFS_H int zfs_crypto_rewrap(zfs_handle_t *, nvlist_t *, boolean_t);
typedef struct zprop_list {
int pl_prop;
char *pl_user_prop;
struct zprop_list *pl_next;
boolean_t pl_all;
size_t pl_width;
size_t pl_recvd_width;
boolean_t pl_fixed;
} zprop_list_t;
_LIBZFS_H int zfs_expand_proplist(zfs_handle_t *, zprop_list_t **, boolean_t,
boolean_t);
_LIBZFS_H void zfs_prune_proplist(zfs_handle_t *, uint8_t *);
_LIBZFS_H int vdev_expand_proplist(zpool_handle_t *, const char *,
zprop_list_t **);
#define ZFS_MOUNTPOINT_NONE "none"
#define ZFS_MOUNTPOINT_LEGACY "legacy"
#define ZFS_FEATURE_DISABLED "disabled"
#define ZFS_FEATURE_ENABLED "enabled"
#define ZFS_FEATURE_ACTIVE "active"
#define ZFS_UNSUPPORTED_INACTIVE "inactive"
#define ZFS_UNSUPPORTED_READONLY "readonly"
/*
* zpool property management
*/
_LIBZFS_H int zpool_expand_proplist(zpool_handle_t *, zprop_list_t **,
zfs_type_t, boolean_t);
_LIBZFS_H int zpool_prop_get_feature(zpool_handle_t *, const char *, char *,
size_t);
_LIBZFS_H const char *zpool_prop_default_string(zpool_prop_t);
_LIBZFS_H uint64_t zpool_prop_default_numeric(zpool_prop_t);
_LIBZFS_H const char *zpool_prop_column_name(zpool_prop_t);
_LIBZFS_H boolean_t zpool_prop_align_right(zpool_prop_t);
/*
* Functions shared by zfs and zpool property management.
*/
_LIBZFS_H int zprop_iter(zprop_func func, void *cb, boolean_t show_all,
boolean_t ordered, zfs_type_t type);
_LIBZFS_H int zprop_get_list(libzfs_handle_t *, char *, zprop_list_t **,
zfs_type_t);
_LIBZFS_H void zprop_free_list(zprop_list_t *);
#define ZFS_GET_NCOLS 5
typedef enum {
GET_COL_NONE,
GET_COL_NAME,
GET_COL_PROPERTY,
GET_COL_VALUE,
GET_COL_RECVD,
GET_COL_SOURCE
} zfs_get_column_t;
/*
* Functions for printing zfs or zpool properties
*/
typedef struct vdev_cbdata {
int cb_name_flags;
char **cb_names;
unsigned int cb_names_count;
} vdev_cbdata_t;
typedef struct zprop_get_cbdata {
int cb_sources;
zfs_get_column_t cb_columns[ZFS_GET_NCOLS];
int cb_colwidths[ZFS_GET_NCOLS + 1];
boolean_t cb_scripted;
boolean_t cb_literal;
boolean_t cb_first;
zprop_list_t *cb_proplist;
zfs_type_t cb_type;
vdev_cbdata_t cb_vdevs;
} zprop_get_cbdata_t;
_LIBZFS_H void zprop_print_one_property(const char *, zprop_get_cbdata_t *,
const char *, const char *, zprop_source_t, const char *,
const char *);
/*
* Iterator functions.
*/
#define ZFS_ITER_RECURSE (1 << 0)
#define ZFS_ITER_ARGS_CAN_BE_PATHS (1 << 1)
#define ZFS_ITER_PROP_LISTSNAPS (1 << 2)
#define ZFS_ITER_DEPTH_LIMIT (1 << 3)
#define ZFS_ITER_RECVD_PROPS (1 << 4)
#define ZFS_ITER_LITERAL_PROPS (1 << 5)
#define ZFS_ITER_SIMPLE (1 << 6)
typedef int (*zfs_iter_f)(zfs_handle_t *, void *);
_LIBZFS_H int zfs_iter_root(libzfs_handle_t *, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_children(zfs_handle_t *, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_dependents(zfs_handle_t *, boolean_t, zfs_iter_f,
void *);
_LIBZFS_H int zfs_iter_filesystems(zfs_handle_t *, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_snapshots(zfs_handle_t *, boolean_t, zfs_iter_f, void *,
uint64_t, uint64_t);
_LIBZFS_H int zfs_iter_snapshots_sorted(zfs_handle_t *, zfs_iter_f, void *,
uint64_t, uint64_t);
_LIBZFS_H int zfs_iter_snapspec(zfs_handle_t *, const char *, zfs_iter_f,
void *);
_LIBZFS_H int zfs_iter_bookmarks(zfs_handle_t *, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_children_v2(zfs_handle_t *, int, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_dependents_v2(zfs_handle_t *, int, boolean_t, zfs_iter_f,
void *);
_LIBZFS_H int zfs_iter_filesystems_v2(zfs_handle_t *, int, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_snapshots_v2(zfs_handle_t *, int, zfs_iter_f, void *,
uint64_t, uint64_t);
_LIBZFS_H int zfs_iter_snapshots_sorted_v2(zfs_handle_t *, int, zfs_iter_f,
void *, uint64_t, uint64_t);
_LIBZFS_H int zfs_iter_snapspec_v2(zfs_handle_t *, int, const char *,
zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_bookmarks_v2(zfs_handle_t *, int, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_mounted(zfs_handle_t *, zfs_iter_f, void *);
typedef struct get_all_cb {
zfs_handle_t **cb_handles;
size_t cb_alloc;
size_t cb_used;
} get_all_cb_t;
_LIBZFS_H void zfs_foreach_mountpoint(libzfs_handle_t *, zfs_handle_t **,
size_t, zfs_iter_f, void *, boolean_t);
_LIBZFS_H void libzfs_add_handle(get_all_cb_t *, zfs_handle_t *);
/*
* Functions to create and destroy datasets.
*/
_LIBZFS_H int zfs_create(libzfs_handle_t *, const char *, zfs_type_t,
nvlist_t *);
_LIBZFS_H int zfs_create_ancestors(libzfs_handle_t *, const char *);
_LIBZFS_H int zfs_destroy(zfs_handle_t *, boolean_t);
_LIBZFS_H int zfs_destroy_snaps(zfs_handle_t *, char *, boolean_t);
_LIBZFS_H int zfs_destroy_snaps_nvl(libzfs_handle_t *, nvlist_t *, boolean_t);
_LIBZFS_H int zfs_destroy_snaps_nvl_os(libzfs_handle_t *, nvlist_t *);
_LIBZFS_H int zfs_clone(zfs_handle_t *, const char *, nvlist_t *);
_LIBZFS_H int zfs_snapshot(libzfs_handle_t *, const char *, boolean_t,
nvlist_t *);
_LIBZFS_H int zfs_snapshot_nvl(libzfs_handle_t *hdl, nvlist_t *snaps,
nvlist_t *props);
_LIBZFS_H int zfs_rollback(zfs_handle_t *, zfs_handle_t *, boolean_t);
typedef struct renameflags {
/* recursive rename */
unsigned int recursive : 1;
/* don't unmount file systems */
unsigned int nounmount : 1;
/* force unmount file systems */
unsigned int forceunmount : 1;
} renameflags_t;
_LIBZFS_H int zfs_rename(zfs_handle_t *, const char *, renameflags_t);
typedef struct sendflags {
/* Amount of extra information to print. */
int verbosity;
/* recursive send (ie, -R) */
boolean_t replicate;
/* for recursive send, skip sending missing snapshots */
boolean_t skipmissing;
/* for incrementals, do all intermediate snapshots */
boolean_t doall;
/* if dataset is a clone, do incremental from its origin */
boolean_t fromorigin;
/* field no longer used, maintained for backwards compatibility */
boolean_t pad;
/* send properties (ie, -p) */
boolean_t props;
/* do not send (no-op, ie. -n) */
boolean_t dryrun;
/* parsable verbose output (ie. -P) */
boolean_t parsable;
/* show progress (ie. -v) */
boolean_t progress;
/* show progress as process title (ie. -V) */
boolean_t progressastitle;
/* large blocks (>128K) are permitted */
boolean_t largeblock;
/* WRITE_EMBEDDED records of type DATA are permitted */
boolean_t embed_data;
/* compressed WRITE records are permitted */
boolean_t compress;
/* raw encrypted records are permitted */
boolean_t raw;
/* only send received properties (ie. -b) */
boolean_t backup;
/* include snapshot holds in send stream */
boolean_t holds;
/* stream represents a partially received dataset */
boolean_t saved;
} sendflags_t;
typedef boolean_t (snapfilter_cb_t)(zfs_handle_t *, void *);
_LIBZFS_H int zfs_send(zfs_handle_t *, const char *, const char *,
sendflags_t *, int, snapfilter_cb_t, void *, nvlist_t **);
_LIBZFS_H int zfs_send_one(zfs_handle_t *, const char *, int, sendflags_t *,
const char *);
_LIBZFS_H int zfs_send_progress(zfs_handle_t *, int, uint64_t *, uint64_t *);
_LIBZFS_H int zfs_send_resume(libzfs_handle_t *, sendflags_t *, int outfd,
const char *);
_LIBZFS_H int zfs_send_saved(zfs_handle_t *, sendflags_t *, int, const char *);
_LIBZFS_H nvlist_t *zfs_send_resume_token_to_nvlist(libzfs_handle_t *hdl,
const char *token);
_LIBZFS_H int zfs_promote(zfs_handle_t *);
_LIBZFS_H int zfs_hold(zfs_handle_t *, const char *, const char *,
boolean_t, int);
_LIBZFS_H int zfs_hold_nvl(zfs_handle_t *, int, nvlist_t *);
_LIBZFS_H int zfs_release(zfs_handle_t *, const char *, const char *,
boolean_t);
_LIBZFS_H int zfs_get_holds(zfs_handle_t *, nvlist_t **);
_LIBZFS_H uint64_t zvol_volsize_to_reservation(zpool_handle_t *, uint64_t,
nvlist_t *);
typedef int (*zfs_userspace_cb_t)(void *arg, const char *domain,
uid_t rid, uint64_t space);
_LIBZFS_H int zfs_userspace(zfs_handle_t *, zfs_userquota_prop_t,
zfs_userspace_cb_t, void *);
_LIBZFS_H int zfs_get_fsacl(zfs_handle_t *, nvlist_t **);
_LIBZFS_H int zfs_set_fsacl(zfs_handle_t *, boolean_t, nvlist_t *);
typedef struct recvflags {
/* print informational messages (ie, -v was specified) */
boolean_t verbose;
/* the destination is a prefix, not the exact fs (ie, -d) */
boolean_t isprefix;
/*
* Only the tail of the sent snapshot path is appended to the
* destination to determine the received snapshot name (ie, -e).
*/
boolean_t istail;
/* do not actually do the recv, just check if it would work (ie, -n) */
boolean_t dryrun;
/* rollback/destroy filesystems as necessary (eg, -F) */
boolean_t force;
/* set "canmount=off" on all modified filesystems */
boolean_t canmountoff;
/*
* Mark the file systems as "resumable" and do not destroy them if the
* receive is interrupted
*/
boolean_t resumable;
/* byteswap flag is used internally; callers need not specify */
boolean_t byteswap;
/* do not mount file systems as they are extracted (private) */
boolean_t nomount;
/* Was holds flag set in the compound header? */
boolean_t holds;
/* skip receive of snapshot holds */
boolean_t skipholds;
/* mount the filesystem unless nomount is specified */
boolean_t domount;
/* force unmount while recv snapshot (private) */
boolean_t forceunmount;
/* use this recv to check (and heal if needed) an existing snapshot */
boolean_t heal;
} recvflags_t;
_LIBZFS_H int zfs_receive(libzfs_handle_t *, const char *, nvlist_t *,
recvflags_t *, int, avl_tree_t *);
typedef enum diff_flags {
ZFS_DIFF_PARSEABLE = 1 << 0,
ZFS_DIFF_TIMESTAMP = 1 << 1,
ZFS_DIFF_CLASSIFY = 1 << 2,
ZFS_DIFF_NO_MANGLE = 1 << 3
} diff_flags_t;
_LIBZFS_H int zfs_show_diffs(zfs_handle_t *, int, const char *, const char *,
int);
/*
* Miscellaneous functions.
*/
_LIBZFS_H const char *zfs_type_to_name(zfs_type_t);
_LIBZFS_H void zfs_refresh_properties(zfs_handle_t *);
_LIBZFS_H int zfs_name_valid(const char *, zfs_type_t);
_LIBZFS_H zfs_handle_t *zfs_path_to_zhandle(libzfs_handle_t *, const char *,
zfs_type_t);
_LIBZFS_H int zfs_parent_name(zfs_handle_t *, char *, size_t);
_LIBZFS_H boolean_t zfs_dataset_exists(libzfs_handle_t *, const char *,
zfs_type_t);
_LIBZFS_H int zfs_spa_version(zfs_handle_t *, int *);
_LIBZFS_H boolean_t zfs_bookmark_exists(const char *path);
/*
* Mount support functions.
*/
_LIBZFS_H boolean_t is_mounted(libzfs_handle_t *, const char *special, char **);
_LIBZFS_H boolean_t zfs_is_mounted(zfs_handle_t *, char **);
_LIBZFS_H int zfs_mount(zfs_handle_t *, const char *, int);
_LIBZFS_H int zfs_mount_at(zfs_handle_t *, const char *, int, const char *);
_LIBZFS_H int zfs_unmount(zfs_handle_t *, const char *, int);
_LIBZFS_H int zfs_unmountall(zfs_handle_t *, int);
_LIBZFS_H int zfs_mount_delegation_check(void);
#if defined(__linux__) || defined(__APPLE__)
_LIBZFS_H int zfs_parse_mount_options(const char *mntopts,
unsigned long *mntflags, unsigned long *zfsflags, int sloppy, char *badopt,
char *mtabopt);
_LIBZFS_H void zfs_adjust_mount_options(zfs_handle_t *zhp, const char *mntpoint,
char *mntopts, char *mtabopt);
#endif
/*
* Share support functions.
*
* enum sa_protocol * lists are terminated with SA_NO_PROTOCOL,
* NULL means "all/any known to this libzfs".
*/
#define SA_NO_PROTOCOL -1
_LIBZFS_H boolean_t zfs_is_shared(zfs_handle_t *zhp, char **where,
const enum sa_protocol *proto);
_LIBZFS_H int zfs_share(zfs_handle_t *zhp, const enum sa_protocol *proto);
_LIBZFS_H int zfs_unshare(zfs_handle_t *zhp, const char *mountpoint,
const enum sa_protocol *proto);
_LIBZFS_H int zfs_unshareall(zfs_handle_t *zhp,
const enum sa_protocol *proto);
_LIBZFS_H void zfs_commit_shares(const enum sa_protocol *proto);
_LIBZFS_H void zfs_truncate_shares(const enum sa_protocol *proto);
_LIBZFS_H int zfs_nicestrtonum(libzfs_handle_t *, const char *, uint64_t *);
/*
* Utility functions to run an external process.
*/
#define STDOUT_VERBOSE 0x01
#define STDERR_VERBOSE 0x02
#define NO_DEFAULT_PATH 0x04 /* Don't use $PATH to lookup the command */
_LIBZFS_H int libzfs_run_process(const char *, char **, int);
_LIBZFS_H int libzfs_run_process_get_stdout(const char *, char *[], char *[],
char **[], int *);
_LIBZFS_H int libzfs_run_process_get_stdout_nopath(const char *, char *[],
char *[], char **[], int *);
_LIBZFS_H void libzfs_free_str_array(char **, int);
_LIBZFS_H boolean_t libzfs_envvar_is_set(const char *);
/*
* Utility functions for zfs version
*/
_LIBZFS_H const char *zfs_version_userland(void);
_LIBZFS_H char *zfs_version_kernel(void);
_LIBZFS_H int zfs_version_print(void);
/*
* Given a device or file, determine if it is part of a pool.
*/
_LIBZFS_H int zpool_in_use(libzfs_handle_t *, int, pool_state_t *, char **,
boolean_t *);
/*
* Label manipulation.
*/
_LIBZFS_H int zpool_clear_label(int);
_LIBZFS_H int zpool_set_bootenv(zpool_handle_t *, const nvlist_t *);
_LIBZFS_H int zpool_get_bootenv(zpool_handle_t *, nvlist_t **);
/*
* Management interfaces for SMB ACL files
*/
_LIBZFS_H int zfs_smb_acl_add(libzfs_handle_t *, char *, char *, char *);
_LIBZFS_H int zfs_smb_acl_remove(libzfs_handle_t *, char *, char *, char *);
_LIBZFS_H int zfs_smb_acl_purge(libzfs_handle_t *, char *, char *);
_LIBZFS_H int zfs_smb_acl_rename(libzfs_handle_t *, char *, char *, char *,
char *);
/*
* Enable and disable datasets within a pool by mounting/unmounting and
* sharing/unsharing them.
*/
_LIBZFS_H int zpool_enable_datasets(zpool_handle_t *, const char *, int);
_LIBZFS_H int zpool_disable_datasets(zpool_handle_t *, boolean_t);
_LIBZFS_H void zpool_disable_datasets_os(zpool_handle_t *, boolean_t);
_LIBZFS_H void zpool_disable_volume_os(const char *);
/*
* Parse a features file for -o compatibility
*/
typedef enum {
ZPOOL_COMPATIBILITY_OK,
ZPOOL_COMPATIBILITY_WARNTOKEN,
ZPOOL_COMPATIBILITY_BADTOKEN,
ZPOOL_COMPATIBILITY_BADFILE,
ZPOOL_COMPATIBILITY_NOFILES
} zpool_compat_status_t;
_LIBZFS_H zpool_compat_status_t zpool_load_compat(const char *,
boolean_t *, char *, size_t);
#ifdef __FreeBSD__
/*
* Attach/detach the given filesystem to/from the given jail.
*/
_LIBZFS_H int zfs_jail(zfs_handle_t *zhp, int jailid, int attach);
/*
* Set loader options for next boot.
*/
_LIBZFS_H int zpool_nextboot(libzfs_handle_t *, uint64_t, uint64_t,
const char *);
#endif /* __FreeBSD__ */
#ifdef __linux__
/*
* Add or delete the given filesystem to/from the given user namespace.
*/
_LIBZFS_H int zfs_userns(zfs_handle_t *zhp, const char *nspath, int attach);
#endif
#ifdef __cplusplus
}
#endif
#endif /* _LIBZFS_H */
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/vnode.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/vnode.h
index 91dadfae6613..0779e58e4953 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/vnode.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/vnode.h
@@ -1,213 +1,217 @@
/*
* Copyright (c) 2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _OPENSOLARIS_SYS_VNODE_H_
#define _OPENSOLARIS_SYS_VNODE_H_
struct vnode;
struct vattr;
struct xucred;
typedef struct flock flock64_t;
typedef struct vnode vnode_t;
typedef struct vattr vattr_t;
-#define vtype_t __enum_uint8(vtype)
+#if __FreeBSD_version < 1400093
+typedef enum vtype vtype_t;
+#else
+#define vtype_t __enum_uint8(vtype)
+#endif
#include <sys/types.h>
#include <sys/queue.h>
#include_next <sys/sdt.h>
#include <sys/namei.h>
enum symfollow { NO_FOLLOW = NOFOLLOW };
#define NOCRED ((struct ucred *)0) /* no credential available */
#define F_FREESP 11 /* Free file space */
#include <sys/proc.h>
#include <sys/vnode_impl.h>
#ifndef IN_BASE
#include_next <sys/vnode.h>
#endif
#include <sys/mount.h>
#include <sys/cred.h>
#include <sys/fcntl.h>
#include <sys/refcount.h>
#include <sys/file.h>
#include <sys/filedesc.h>
#include <sys/syscallsubr.h>
#include <sys/vm.h>
#include <vm/vm_object.h>
typedef struct vop_vector vnodeops_t;
#define VOP_FID VOP_VPTOFH
#define vop_fid vop_vptofh
#define vop_fid_args vop_vptofh_args
#define a_fid a_fhp
#define rootvfs (rootvnode == NULL ? NULL : rootvnode->v_mount)
#ifndef IN_BASE
static __inline int
vn_is_readonly(vnode_t *vp)
{
return (vp->v_mount->mnt_flag & MNT_RDONLY);
}
#endif
#define vn_vfswlock(vp) (0)
#define vn_vfsunlock(vp) do { } while (0)
#define vn_ismntpt(vp) \
((vp)->v_type == VDIR && (vp)->v_mountedhere != NULL)
#define vn_mountedvfs(vp) ((vp)->v_mountedhere)
#define vn_has_cached_data(vp) \
((vp)->v_object != NULL && \
(vp)->v_object->resident_page_count > 0)
#ifndef IN_BASE
static __inline void
vn_flush_cached_data(vnode_t *vp, boolean_t sync)
{
#if __FreeBSD_version > 1300054
if (vm_object_mightbedirty(vp->v_object)) {
#else
if (vp->v_object->flags & OBJ_MIGHTBEDIRTY) {
#endif
int flags = sync ? OBJPC_SYNC : 0;
vn_lock(vp, LK_SHARED | LK_RETRY);
zfs_vmobject_wlock(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, flags);
zfs_vmobject_wunlock(vp->v_object);
VOP_UNLOCK(vp);
}
}
#endif
#define vn_exists(vp) do { } while (0)
#define vn_invalid(vp) do { } while (0)
#define vn_free(vp) do { } while (0)
#define vn_matchops(vp, vops) ((vp)->v_op == &(vops))
#define VN_HOLD(v) vref(v)
#define VN_RELE(v) vrele(v)
#define VN_URELE(v) vput(v)
#define vnevent_create(vp, ct) do { } while (0)
#define vnevent_link(vp, ct) do { } while (0)
#define vnevent_remove(vp, dvp, name, ct) do { } while (0)
#define vnevent_rmdir(vp, dvp, name, ct) do { } while (0)
#define vnevent_rename_src(vp, dvp, name, ct) do { } while (0)
#define vnevent_rename_dest(vp, dvp, name, ct) do { } while (0)
#define vnevent_rename_dest_dir(vp, ct) do { } while (0)
#define specvp(vp, rdev, type, cr) (VN_HOLD(vp), (vp))
#define MANDLOCK(vp, mode) (0)
/*
* We will use va_spare is place of Solaris' va_mask.
* This field is initialized in zfs_setattr().
*/
#define va_mask va_spare
/* TODO: va_fileid is shorter than va_nodeid !!! */
#define va_nodeid va_fileid
/* TODO: This field needs conversion! */
#define va_nblocks va_bytes
#define va_blksize va_blocksize
#define MAXOFFSET_T OFF_MAX
#define FIGNORECASE 0x00
/*
* Attributes of interest to the caller of setattr or getattr.
*/
#undef AT_UID
#undef AT_GID
#define AT_MODE 0x00002
#define AT_UID 0x00004
#define AT_GID 0x00008
#define AT_FSID 0x00010
#define AT_NODEID 0x00020
#define AT_NLINK 0x00040
#define AT_SIZE 0x00080
#define AT_ATIME 0x00100
#define AT_MTIME 0x00200
#define AT_CTIME 0x00400
#define AT_RDEV 0x00800
#define AT_BLKSIZE 0x01000
#define AT_NBLOCKS 0x02000
/* 0x04000 */ /* unused */
#define AT_SEQ 0x08000
/*
* If AT_XVATTR is set then there are additional bits to process in
* the xvattr_t's attribute bitmap. If this is not set then the bitmap
* MUST be ignored. Note that this bit must be set/cleared explicitly.
* That is, setting AT_ALL will NOT set AT_XVATTR.
*/
#define AT_XVATTR 0x10000
#define AT_ALL (AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|\
AT_NLINK|AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|\
AT_RDEV|AT_BLKSIZE|AT_NBLOCKS|AT_SEQ)
#define AT_STAT (AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|AT_NLINK|\
AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|AT_RDEV)
#define AT_TIMES (AT_ATIME|AT_MTIME|AT_CTIME)
#define AT_NOSET (AT_NLINK|AT_RDEV|AT_FSID|AT_NODEID|\
AT_BLKSIZE|AT_NBLOCKS|AT_SEQ)
#ifndef IN_BASE
static __inline void
vattr_init_mask(vattr_t *vap)
{
vap->va_mask = 0;
if (vap->va_uid != (uid_t)VNOVAL)
vap->va_mask |= AT_UID;
if (vap->va_gid != (gid_t)VNOVAL)
vap->va_mask |= AT_GID;
if (vap->va_size != (u_quad_t)VNOVAL)
vap->va_mask |= AT_SIZE;
if (vap->va_atime.tv_sec != VNOVAL)
vap->va_mask |= AT_ATIME;
if (vap->va_mtime.tv_sec != VNOVAL)
vap->va_mask |= AT_MTIME;
if (vap->va_mode != (uint16_t)VNOVAL)
vap->va_mask |= AT_MODE;
if (vap->va_flags != VNOVAL)
vap->va_mask |= AT_XVATTR;
}
#endif
#define RLIM64_INFINITY 0
#include <sys/vfs.h>
#endif /* _OPENSOLARIS_SYS_VNODE_H_ */
diff --git a/sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h b/sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h
index c5c6385be6ff..e0f20ba32008 100644
--- a/sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h
+++ b/sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h
@@ -1,764 +1,774 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2011 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* LLNL-CODE-403049.
*/
#ifndef _ZFS_BLKDEV_H
#define _ZFS_BLKDEV_H
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/hdreg.h>
#include <linux/major.h>
#include <linux/msdos_fs.h> /* for SECTOR_* */
#include <linux/bio.h>
#ifdef HAVE_BLK_MQ
#include <linux/blk-mq.h>
#endif
#ifndef HAVE_BLK_QUEUE_FLAG_SET
static inline void
blk_queue_flag_set(unsigned int flag, struct request_queue *q)
{
queue_flag_set(flag, q);
}
#endif
#ifndef HAVE_BLK_QUEUE_FLAG_CLEAR
static inline void
blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
{
queue_flag_clear(flag, q);
}
#endif
/*
* 4.7 API,
* The blk_queue_write_cache() interface has replaced blk_queue_flush()
* interface. However, the new interface is GPL-only thus we implement
* our own trivial wrapper when the GPL-only version is detected.
*
* 2.6.36 - 4.6 API,
* The blk_queue_flush() interface has replaced blk_queue_ordered()
* interface. However, while the old interface was available to all the
* new one is GPL-only. Thus if the GPL-only version is detected we
* implement our own trivial helper.
*/
static inline void
blk_queue_set_write_cache(struct request_queue *q, bool wc, bool fua)
{
#if defined(HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY)
if (wc)
blk_queue_flag_set(QUEUE_FLAG_WC, q);
else
blk_queue_flag_clear(QUEUE_FLAG_WC, q);
if (fua)
blk_queue_flag_set(QUEUE_FLAG_FUA, q);
else
blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
#elif defined(HAVE_BLK_QUEUE_WRITE_CACHE)
blk_queue_write_cache(q, wc, fua);
#elif defined(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY)
if (wc)
q->flush_flags |= REQ_FLUSH;
if (fua)
q->flush_flags |= REQ_FUA;
#elif defined(HAVE_BLK_QUEUE_FLUSH)
blk_queue_flush(q, (wc ? REQ_FLUSH : 0) | (fua ? REQ_FUA : 0));
#else
#error "Unsupported kernel"
#endif
}
static inline void
blk_queue_set_read_ahead(struct request_queue *q, unsigned long ra_pages)
{
#if !defined(HAVE_BLK_QUEUE_UPDATE_READAHEAD) && \
!defined(HAVE_DISK_UPDATE_READAHEAD)
#ifdef HAVE_BLK_QUEUE_BDI_DYNAMIC
q->backing_dev_info->ra_pages = ra_pages;
#else
q->backing_dev_info.ra_pages = ra_pages;
#endif
#endif
}
#ifdef HAVE_BIO_BVEC_ITER
#define BIO_BI_SECTOR(bio) (bio)->bi_iter.bi_sector
#define BIO_BI_SIZE(bio) (bio)->bi_iter.bi_size
#define BIO_BI_IDX(bio) (bio)->bi_iter.bi_idx
#define BIO_BI_SKIP(bio) (bio)->bi_iter.bi_bvec_done
#define bio_for_each_segment4(bv, bvp, b, i) \
bio_for_each_segment((bv), (b), (i))
typedef struct bvec_iter bvec_iterator_t;
#else
#define BIO_BI_SECTOR(bio) (bio)->bi_sector
#define BIO_BI_SIZE(bio) (bio)->bi_size
#define BIO_BI_IDX(bio) (bio)->bi_idx
#define BIO_BI_SKIP(bio) (0)
#define bio_for_each_segment4(bv, bvp, b, i) \
bio_for_each_segment((bvp), (b), (i))
typedef int bvec_iterator_t;
#endif
static inline void
bio_set_flags_failfast(struct block_device *bdev, int *flags, bool dev,
bool transport, bool driver)
{
#ifdef CONFIG_BUG
/*
* Disable FAILFAST for loopback devices because of the
* following incorrect BUG_ON() in loop_make_request().
* This support is also disabled for md devices because the
* test suite layers md devices on top of loopback devices.
* This may be removed when the loopback driver is fixed.
*
* BUG_ON(!lo || (rw != READ && rw != WRITE));
*/
if ((MAJOR(bdev->bd_dev) == LOOP_MAJOR) ||
(MAJOR(bdev->bd_dev) == MD_MAJOR))
return;
#ifdef BLOCK_EXT_MAJOR
if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
return;
#endif /* BLOCK_EXT_MAJOR */
#endif /* CONFIG_BUG */
if (dev)
*flags |= REQ_FAILFAST_DEV;
if (transport)
*flags |= REQ_FAILFAST_TRANSPORT;
if (driver)
*flags |= REQ_FAILFAST_DRIVER;
}
/*
* Maximum disk label length, it may be undefined for some kernels.
*/
#if !defined(DISK_NAME_LEN)
#define DISK_NAME_LEN 32
#endif /* DISK_NAME_LEN */
#ifdef HAVE_BIO_BI_STATUS
static inline int
bi_status_to_errno(blk_status_t status)
{
switch (status) {
case BLK_STS_OK:
return (0);
case BLK_STS_NOTSUPP:
return (EOPNOTSUPP);
case BLK_STS_TIMEOUT:
return (ETIMEDOUT);
case BLK_STS_NOSPC:
return (ENOSPC);
case BLK_STS_TRANSPORT:
return (ENOLINK);
case BLK_STS_TARGET:
return (EREMOTEIO);
+#ifdef HAVE_BLK_STS_RESV_CONFLICT
+ case BLK_STS_RESV_CONFLICT:
+#else
case BLK_STS_NEXUS:
+#endif
return (EBADE);
case BLK_STS_MEDIUM:
return (ENODATA);
case BLK_STS_PROTECTION:
return (EILSEQ);
case BLK_STS_RESOURCE:
return (ENOMEM);
case BLK_STS_AGAIN:
return (EAGAIN);
case BLK_STS_IOERR:
return (EIO);
default:
return (EIO);
}
}
static inline blk_status_t
errno_to_bi_status(int error)
{
switch (error) {
case 0:
return (BLK_STS_OK);
case EOPNOTSUPP:
return (BLK_STS_NOTSUPP);
case ETIMEDOUT:
return (BLK_STS_TIMEOUT);
case ENOSPC:
return (BLK_STS_NOSPC);
case ENOLINK:
return (BLK_STS_TRANSPORT);
case EREMOTEIO:
return (BLK_STS_TARGET);
case EBADE:
+#ifdef HAVE_BLK_STS_RESV_CONFLICT
+ return (BLK_STS_RESV_CONFLICT);
+#else
return (BLK_STS_NEXUS);
+#endif
case ENODATA:
return (BLK_STS_MEDIUM);
case EILSEQ:
return (BLK_STS_PROTECTION);
case ENOMEM:
return (BLK_STS_RESOURCE);
case EAGAIN:
return (BLK_STS_AGAIN);
case EIO:
return (BLK_STS_IOERR);
default:
return (BLK_STS_IOERR);
}
}
#endif /* HAVE_BIO_BI_STATUS */
/*
* 4.3 API change
* The bio_endio() prototype changed slightly. These are helper
* macro's to ensure the prototype and invocation are handled.
*/
#ifdef HAVE_1ARG_BIO_END_IO_T
#ifdef HAVE_BIO_BI_STATUS
#define BIO_END_IO_ERROR(bio) bi_status_to_errno(bio->bi_status)
#define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x)
#define BIO_END_IO(bio, error) bio_set_bi_status(bio, error)
static inline void
bio_set_bi_status(struct bio *bio, int error)
{
ASSERT3S(error, <=, 0);
bio->bi_status = errno_to_bi_status(-error);
bio_endio(bio);
}
#else
#define BIO_END_IO_ERROR(bio) (-(bio->bi_error))
#define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x)
#define BIO_END_IO(bio, error) bio_set_bi_error(bio, error)
static inline void
bio_set_bi_error(struct bio *bio, int error)
{
ASSERT3S(error, <=, 0);
bio->bi_error = error;
bio_endio(bio);
}
#endif /* HAVE_BIO_BI_STATUS */
#else
#define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x, int z)
#define BIO_END_IO(bio, error) bio_endio(bio, error);
#endif /* HAVE_1ARG_BIO_END_IO_T */
/*
* 5.15 MACRO,
* GD_DEAD
*
* 2.6.36 - 5.14 MACRO,
* GENHD_FL_UP
*
* Check the disk status and return B_TRUE if alive
* otherwise B_FALSE
*/
static inline boolean_t
zfs_check_disk_status(struct block_device *bdev)
{
#if defined(GENHD_FL_UP)
return (!!(bdev->bd_disk->flags & GENHD_FL_UP));
#elif defined(GD_DEAD)
return (!test_bit(GD_DEAD, &bdev->bd_disk->state));
#else
/*
* This is encountered if neither GENHD_FL_UP nor GD_DEAD is available in
* the kernel - likely due to an MACRO change that needs to be chased down.
*/
#error "Unsupported kernel: no usable disk status check"
#endif
}
/*
* 4.1 API,
* 3.10.0 CentOS 7.x API,
* blkdev_reread_part()
*
* For older kernels trigger a re-reading of the partition table by calling
* check_disk_change() which calls flush_disk() to invalidate the device.
*
* For newer kernels (as of 5.10), bdev_check_media_change is used, in favor of
* check_disk_change(), with the modification that invalidation is no longer
* forced.
*/
#ifdef HAVE_CHECK_DISK_CHANGE
#define zfs_check_media_change(bdev) check_disk_change(bdev)
#ifdef HAVE_BLKDEV_REREAD_PART
#define vdev_bdev_reread_part(bdev) blkdev_reread_part(bdev)
#else
#define vdev_bdev_reread_part(bdev) check_disk_change(bdev)
#endif /* HAVE_BLKDEV_REREAD_PART */
#else
#ifdef HAVE_BDEV_CHECK_MEDIA_CHANGE
static inline int
zfs_check_media_change(struct block_device *bdev)
{
#ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
struct gendisk *gd = bdev->bd_disk;
const struct block_device_operations *bdo = gd->fops;
#endif
if (!bdev_check_media_change(bdev))
return (0);
#ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
/*
* Force revalidation, to mimic the old behavior of
* check_disk_change()
*/
if (bdo->revalidate_disk)
bdo->revalidate_disk(gd);
#endif
return (0);
}
#define vdev_bdev_reread_part(bdev) zfs_check_media_change(bdev)
+#elif defined(HAVE_DISK_CHECK_MEDIA_CHANGE)
+#define vdev_bdev_reread_part(bdev) disk_check_media_change(bdev->bd_disk)
#else
/*
* This is encountered if check_disk_change() and bdev_check_media_change()
* are not available in the kernel - likely due to an API change that needs
* to be chased down.
*/
#error "Unsupported kernel: no usable disk change check"
#endif /* HAVE_BDEV_CHECK_MEDIA_CHANGE */
#endif /* HAVE_CHECK_DISK_CHANGE */
/*
* 2.6.27 API change
* The function was exported for use, prior to this it existed but the
* symbol was not exported.
*
* 4.4.0-6.21 API change for Ubuntu
* lookup_bdev() gained a second argument, FMODE_*, to check inode permissions.
*
* 5.11 API change
* Changed to take a dev_t argument which is set on success and return a
* non-zero error code on failure.
*/
static inline int
vdev_lookup_bdev(const char *path, dev_t *dev)
{
#if defined(HAVE_DEVT_LOOKUP_BDEV)
return (lookup_bdev(path, dev));
#elif defined(HAVE_1ARG_LOOKUP_BDEV)
struct block_device *bdev = lookup_bdev(path);
if (IS_ERR(bdev))
return (PTR_ERR(bdev));
*dev = bdev->bd_dev;
bdput(bdev);
return (0);
#elif defined(HAVE_MODE_LOOKUP_BDEV)
struct block_device *bdev = lookup_bdev(path, FMODE_READ);
if (IS_ERR(bdev))
return (PTR_ERR(bdev));
*dev = bdev->bd_dev;
bdput(bdev);
return (0);
#else
#error "Unsupported kernel"
#endif
}
/*
* Kernels without bio_set_op_attrs use bi_rw for the bio flags.
*/
#if !defined(HAVE_BIO_SET_OP_ATTRS)
static inline void
bio_set_op_attrs(struct bio *bio, unsigned rw, unsigned flags)
{
#if defined(HAVE_BIO_BI_OPF)
bio->bi_opf = rw | flags;
#else
bio->bi_rw |= rw | flags;
#endif /* HAVE_BIO_BI_OPF */
}
#endif
/*
* bio_set_flush - Set the appropriate flags in a bio to guarantee
* data are on non-volatile media on completion.
*
* 2.6.37 - 4.8 API,
* Introduce WRITE_FLUSH, WRITE_FUA, and WRITE_FLUSH_FUA flags as a
* replacement for WRITE_BARRIER to allow expressing richer semantics
* to the block layer. It's up to the block layer to implement the
* semantics correctly. Use the WRITE_FLUSH_FUA flag combination.
*
* 4.8 - 4.9 API,
* REQ_FLUSH was renamed to REQ_PREFLUSH. For consistency with previous
* OpenZFS releases, prefer the WRITE_FLUSH_FUA flag set if it's available.
*
* 4.10 API,
* The read/write flags and their modifiers, including WRITE_FLUSH,
* WRITE_FUA and WRITE_FLUSH_FUA were removed from fs.h in
* torvalds/linux@70fd7614 and replaced by direct flag modification
* of the REQ_ flags in bio->bi_opf. Use REQ_PREFLUSH.
*/
static inline void
bio_set_flush(struct bio *bio)
{
#if defined(HAVE_REQ_PREFLUSH) /* >= 4.10 */
bio_set_op_attrs(bio, 0, REQ_PREFLUSH | REQ_OP_WRITE);
#elif defined(WRITE_FLUSH_FUA) /* >= 2.6.37 and <= 4.9 */
bio_set_op_attrs(bio, 0, WRITE_FLUSH_FUA);
#else
#error "Allowing the build will cause bio_set_flush requests to be ignored."
#endif
}
/*
* 4.8 API,
* REQ_OP_FLUSH
*
* 4.8-rc0 - 4.8-rc1,
* REQ_PREFLUSH
*
* 2.6.36 - 4.7 API,
* REQ_FLUSH
*
* in all cases but may have a performance impact for some kernels. It
* has the advantage of minimizing kernel specific changes in the zvol code.
*
*/
static inline boolean_t
bio_is_flush(struct bio *bio)
{
#if defined(HAVE_REQ_OP_FLUSH) && defined(HAVE_BIO_BI_OPF)
return ((bio_op(bio) == REQ_OP_FLUSH) || (bio->bi_opf & REQ_PREFLUSH));
#elif defined(HAVE_REQ_PREFLUSH) && defined(HAVE_BIO_BI_OPF)
return (bio->bi_opf & REQ_PREFLUSH);
#elif defined(HAVE_REQ_PREFLUSH) && !defined(HAVE_BIO_BI_OPF)
return (bio->bi_rw & REQ_PREFLUSH);
#elif defined(HAVE_REQ_FLUSH)
return (bio->bi_rw & REQ_FLUSH);
#else
#error "Unsupported kernel"
#endif
}
/*
* 4.8 API,
* REQ_FUA flag moved to bio->bi_opf
*
* 2.6.x - 4.7 API,
* REQ_FUA
*/
static inline boolean_t
bio_is_fua(struct bio *bio)
{
#if defined(HAVE_BIO_BI_OPF)
return (bio->bi_opf & REQ_FUA);
#elif defined(REQ_FUA)
return (bio->bi_rw & REQ_FUA);
#else
#error "Allowing the build will cause fua requests to be ignored."
#endif
}
/*
* 4.8 API,
* REQ_OP_DISCARD
*
* 2.6.36 - 4.7 API,
* REQ_DISCARD
*
* In all cases the normal I/O path is used for discards. The only
* difference is how the kernel tags individual I/Os as discards.
*/
static inline boolean_t
bio_is_discard(struct bio *bio)
{
#if defined(HAVE_REQ_OP_DISCARD)
return (bio_op(bio) == REQ_OP_DISCARD);
#elif defined(HAVE_REQ_DISCARD)
return (bio->bi_rw & REQ_DISCARD);
#else
#error "Unsupported kernel"
#endif
}
/*
* 4.8 API,
* REQ_OP_SECURE_ERASE
*
* 2.6.36 - 4.7 API,
* REQ_SECURE
*/
static inline boolean_t
bio_is_secure_erase(struct bio *bio)
{
#if defined(HAVE_REQ_OP_SECURE_ERASE)
return (bio_op(bio) == REQ_OP_SECURE_ERASE);
#elif defined(REQ_SECURE)
return (bio->bi_rw & REQ_SECURE);
#else
return (0);
#endif
}
/*
* 2.6.33 API change
* Discard granularity and alignment restrictions may now be set. For
* older kernels which do not support this it is safe to skip it.
*/
static inline void
blk_queue_discard_granularity(struct request_queue *q, unsigned int dg)
{
q->limits.discard_granularity = dg;
}
/*
* 5.19 API,
* bdev_max_discard_sectors()
*
* 2.6.32 API,
* blk_queue_discard()
*/
static inline boolean_t
bdev_discard_supported(struct block_device *bdev)
{
#if defined(HAVE_BDEV_MAX_DISCARD_SECTORS)
return (!!bdev_max_discard_sectors(bdev));
#elif defined(HAVE_BLK_QUEUE_DISCARD)
return (!!blk_queue_discard(bdev_get_queue(bdev)));
#else
#error "Unsupported kernel"
#endif
}
/*
* 5.19 API,
* bdev_max_secure_erase_sectors()
*
* 4.8 API,
* blk_queue_secure_erase()
*
* 2.6.36 - 4.7 API,
* blk_queue_secdiscard()
*/
static inline boolean_t
bdev_secure_discard_supported(struct block_device *bdev)
{
#if defined(HAVE_BDEV_MAX_SECURE_ERASE_SECTORS)
return (!!bdev_max_secure_erase_sectors(bdev));
#elif defined(HAVE_BLK_QUEUE_SECURE_ERASE)
return (!!blk_queue_secure_erase(bdev_get_queue(bdev)));
#elif defined(HAVE_BLK_QUEUE_SECDISCARD)
return (!!blk_queue_secdiscard(bdev_get_queue(bdev)));
#else
#error "Unsupported kernel"
#endif
}
/*
* A common holder for vdev_bdev_open() is used to relax the exclusive open
* semantics slightly. Internal vdev disk callers may pass VDEV_HOLDER to
* allow them to open the device multiple times. Other kernel callers and
* user space processes which don't pass this value will get EBUSY. This is
* currently required for the correct operation of hot spares.
*/
#define VDEV_HOLDER ((void *)0x2401de7)
static inline unsigned long
blk_generic_start_io_acct(struct request_queue *q __attribute__((unused)),
struct gendisk *disk __attribute__((unused)),
int rw __attribute__((unused)), struct bio *bio)
{
#if defined(HAVE_BDEV_IO_ACCT_63)
return (bdev_start_io_acct(bio->bi_bdev, bio_op(bio),
jiffies));
#elif defined(HAVE_BDEV_IO_ACCT_OLD)
return (bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
bio_op(bio), jiffies));
#elif defined(HAVE_DISK_IO_ACCT)
return (disk_start_io_acct(disk, bio_sectors(bio), bio_op(bio)));
#elif defined(HAVE_BIO_IO_ACCT)
return (bio_start_io_acct(bio));
#elif defined(HAVE_GENERIC_IO_ACCT_3ARG)
unsigned long start_time = jiffies;
generic_start_io_acct(rw, bio_sectors(bio), &disk->part0);
return (start_time);
#elif defined(HAVE_GENERIC_IO_ACCT_4ARG)
unsigned long start_time = jiffies;
generic_start_io_acct(q, rw, bio_sectors(bio), &disk->part0);
return (start_time);
#else
/* Unsupported */
return (0);
#endif
}
static inline void
blk_generic_end_io_acct(struct request_queue *q __attribute__((unused)),
struct gendisk *disk __attribute__((unused)),
int rw __attribute__((unused)), struct bio *bio, unsigned long start_time)
{
#if defined(HAVE_BDEV_IO_ACCT_63)
bdev_end_io_acct(bio->bi_bdev, bio_op(bio), bio_sectors(bio),
start_time);
#elif defined(HAVE_BDEV_IO_ACCT_OLD)
bdev_end_io_acct(bio->bi_bdev, bio_op(bio), start_time);
#elif defined(HAVE_DISK_IO_ACCT)
disk_end_io_acct(disk, bio_op(bio), start_time);
#elif defined(HAVE_BIO_IO_ACCT)
bio_end_io_acct(bio, start_time);
#elif defined(HAVE_GENERIC_IO_ACCT_3ARG)
generic_end_io_acct(rw, &disk->part0, start_time);
#elif defined(HAVE_GENERIC_IO_ACCT_4ARG)
generic_end_io_acct(q, rw, &disk->part0, start_time);
#endif
}
#ifndef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
static inline struct request_queue *
blk_generic_alloc_queue(make_request_fn make_request, int node_id)
{
#if defined(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN)
return (blk_alloc_queue(make_request, node_id));
#elif defined(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN_RH)
return (blk_alloc_queue_rh(make_request, node_id));
#else
struct request_queue *q = blk_alloc_queue(GFP_KERNEL);
if (q != NULL)
blk_queue_make_request(q, make_request);
return (q);
#endif
}
#endif /* !HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
/*
* All the io_*() helper functions below can operate on a bio, or a rq, but
* not both. The older submit_bio() codepath will pass a bio, and the
* newer blk-mq codepath will pass a rq.
*/
static inline int
io_data_dir(struct bio *bio, struct request *rq)
{
#ifdef HAVE_BLK_MQ
if (rq != NULL) {
if (op_is_write(req_op(rq))) {
return (WRITE);
} else {
return (READ);
}
}
#else
ASSERT3P(rq, ==, NULL);
#endif
return (bio_data_dir(bio));
}
static inline int
io_is_flush(struct bio *bio, struct request *rq)
{
#ifdef HAVE_BLK_MQ
if (rq != NULL)
return (req_op(rq) == REQ_OP_FLUSH);
#else
ASSERT3P(rq, ==, NULL);
#endif
return (bio_is_flush(bio));
}
static inline int
io_is_discard(struct bio *bio, struct request *rq)
{
#ifdef HAVE_BLK_MQ
if (rq != NULL)
return (req_op(rq) == REQ_OP_DISCARD);
#else
ASSERT3P(rq, ==, NULL);
#endif
return (bio_is_discard(bio));
}
static inline int
io_is_secure_erase(struct bio *bio, struct request *rq)
{
#ifdef HAVE_BLK_MQ
if (rq != NULL)
return (req_op(rq) == REQ_OP_SECURE_ERASE);
#else
ASSERT3P(rq, ==, NULL);
#endif
return (bio_is_secure_erase(bio));
}
static inline int
io_is_fua(struct bio *bio, struct request *rq)
{
#ifdef HAVE_BLK_MQ
if (rq != NULL)
return (rq->cmd_flags & REQ_FUA);
#else
ASSERT3P(rq, ==, NULL);
#endif
return (bio_is_fua(bio));
}
static inline uint64_t
io_offset(struct bio *bio, struct request *rq)
{
#ifdef HAVE_BLK_MQ
if (rq != NULL)
return (blk_rq_pos(rq) << 9);
#else
ASSERT3P(rq, ==, NULL);
#endif
return (BIO_BI_SECTOR(bio) << 9);
}
static inline uint64_t
io_size(struct bio *bio, struct request *rq)
{
#ifdef HAVE_BLK_MQ
if (rq != NULL)
return (blk_rq_bytes(rq));
#else
ASSERT3P(rq, ==, NULL);
#endif
return (BIO_BI_SIZE(bio));
}
static inline int
io_has_data(struct bio *bio, struct request *rq)
{
#ifdef HAVE_BLK_MQ
if (rq != NULL)
return (bio_has_data(rq->bio));
#else
ASSERT3P(rq, ==, NULL);
#endif
return (bio_has_data(bio));
}
#endif /* _ZFS_BLKDEV_H */
diff --git a/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_x86.h b/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_x86.h
index 1d77f0487a30..699b8a571824 100644
--- a/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_x86.h
+++ b/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_x86.h
@@ -1,757 +1,766 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2016 Gvozden Neskovic <neskovic@compeng.uni-frankfurt.de>.
*/
/*
* USER API:
*
* Kernel fpu methods:
* kfpu_allowed()
* kfpu_begin()
* kfpu_end()
* kfpu_init()
* kfpu_fini()
*
* SIMD support:
*
* Following functions should be called to determine whether CPU feature
* is supported. All functions are usable in kernel and user space.
* If a SIMD algorithm is using more than one instruction set
* all relevant feature test functions should be called.
*
* Supported features:
* zfs_sse_available()
* zfs_sse2_available()
* zfs_sse3_available()
* zfs_ssse3_available()
* zfs_sse4_1_available()
* zfs_sse4_2_available()
*
* zfs_avx_available()
* zfs_avx2_available()
*
* zfs_bmi1_available()
* zfs_bmi2_available()
*
* zfs_shani_available()
*
* zfs_avx512f_available()
* zfs_avx512cd_available()
* zfs_avx512er_available()
* zfs_avx512pf_available()
* zfs_avx512bw_available()
* zfs_avx512dq_available()
* zfs_avx512vl_available()
* zfs_avx512ifma_available()
* zfs_avx512vbmi_available()
*
* NOTE(AVX-512VL): If using AVX-512 instructions with 128Bit registers
* also add zfs_avx512vl_available() to feature check.
*/
#ifndef _LINUX_SIMD_X86_H
#define _LINUX_SIMD_X86_H
/* only for __x86 */
#if defined(__x86)
#include <sys/types.h>
#include <asm/cpufeature.h>
/*
* Disable the WARN_ON_FPU() macro to prevent additional dependencies
* when providing the kfpu_* functions. Relevant warnings are included
* as appropriate and are unconditionally enabled.
*/
#if defined(CONFIG_X86_DEBUG_FPU) && !defined(KERNEL_EXPORTS_X86_FPU)
#undef CONFIG_X86_DEBUG_FPU
#endif
/*
* The following cases are for kernels which export either the
* kernel_fpu_* or __kernel_fpu_* functions.
*/
#if defined(KERNEL_EXPORTS_X86_FPU)
#if defined(HAVE_KERNEL_FPU_API_HEADER)
#include <asm/fpu/api.h>
#if defined(HAVE_KERNEL_FPU_INTERNAL_HEADER)
#include <asm/fpu/internal.h>
#endif
#else
#include <asm/i387.h>
#endif
#define kfpu_allowed() 1
#define kfpu_init() 0
#define kfpu_fini() ((void) 0)
#if defined(HAVE_UNDERSCORE_KERNEL_FPU)
#define kfpu_begin() \
{ \
preempt_disable(); \
__kernel_fpu_begin(); \
}
#define kfpu_end() \
{ \
__kernel_fpu_end(); \
preempt_enable(); \
}
#elif defined(HAVE_KERNEL_FPU)
#define kfpu_begin() kernel_fpu_begin()
#define kfpu_end() kernel_fpu_end()
#else
/*
* This case is unreachable. When KERNEL_EXPORTS_X86_FPU is defined then
* either HAVE_UNDERSCORE_KERNEL_FPU or HAVE_KERNEL_FPU must be defined.
*/
#error "Unreachable kernel configuration"
#endif
#else /* defined(KERNEL_EXPORTS_X86_FPU) */
/*
* When the kernel_fpu_* symbols are unavailable then provide our own
* versions which allow the FPU to be safely used.
*/
#if defined(HAVE_KERNEL_FPU_INTERNAL)
/*
* For kernels not exporting *kfpu_{begin,end} we have to use inline assembly
* with the XSAVE{,OPT,S} instructions, so we need the toolchain to support at
* least XSAVE.
*/
#if !defined(HAVE_XSAVE)
#error "Toolchain needs to support the XSAVE assembler instruction"
#endif
+#ifndef XFEATURE_MASK_XTILE
+/*
+ * For kernels where this doesn't exist yet, we still don't want to break
+ * by save/restoring this broken nonsense.
+ * See issue #14989 or Intel errata SPR4 for why
+ */
+#define XFEATURE_MASK_XTILE 0x60000
+#endif
+
#include <linux/mm.h>
#include <linux/slab.h>
extern uint8_t **zfs_kfpu_fpregs;
/*
* Return the size in bytes required by the XSAVE instruction for an
* XSAVE area containing all the user state components supported by this CPU.
* See: Intel 64 and IA-32 Architectures Software Developer’s Manual.
* Dec. 2021. Vol. 2A p. 3-222.
*/
static inline uint32_t
get_xsave_area_size(void)
{
if (!boot_cpu_has(X86_FEATURE_OSXSAVE)) {
return (0);
}
/*
* Call CPUID with leaf 13 and subleaf 0. The size is in ecx.
* We don't need to check for cpuid_max here, since if this CPU has
* OSXSAVE set, it has leaf 13 (0x0D) as well.
*/
uint32_t eax, ebx, ecx, edx;
eax = 13U;
ecx = 0U;
__asm__ __volatile__("cpuid"
: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
: "a" (eax), "c" (ecx));
return (ecx);
}
/*
* Return the allocation order of the maximum buffer size required to save the
* FPU state on this architecture. The value returned is the same as Linux'
* get_order() function would return (i.e. 2^order = nr. of pages required).
* Currently this will always return 0 since the save area is below 4k even for
* a full fledged AVX-512 implementation.
*/
static inline int
get_fpuregs_save_area_order(void)
{
size_t area_size = (size_t)get_xsave_area_size();
/*
* If we are dealing with a CPU not supporting XSAVE,
* get_xsave_area_size() will return 0. Thus the maximum memory
* required is the FXSAVE area size which is 512 bytes. See: Intel 64
* and IA-32 Architectures Software Developer’s Manual. Dec. 2021.
* Vol. 2A p. 3-451.
*/
if (area_size == 0) {
area_size = 512;
}
return (get_order(area_size));
}
/*
* Initialize per-cpu variables to store FPU state.
*/
static inline void
kfpu_fini(void)
{
int cpu;
int order = get_fpuregs_save_area_order();
for_each_possible_cpu(cpu) {
if (zfs_kfpu_fpregs[cpu] != NULL) {
free_pages((unsigned long)zfs_kfpu_fpregs[cpu], order);
}
}
kfree(zfs_kfpu_fpregs);
}
static inline int
kfpu_init(void)
{
zfs_kfpu_fpregs = kzalloc(num_possible_cpus() * sizeof (uint8_t *),
GFP_KERNEL);
if (zfs_kfpu_fpregs == NULL)
return (-ENOMEM);
/*
* The fxsave and xsave operations require 16-/64-byte alignment of
* the target memory. Since kmalloc() provides no alignment
* guarantee instead use alloc_pages_node().
*/
int cpu;
int order = get_fpuregs_save_area_order();
for_each_possible_cpu(cpu) {
struct page *page = alloc_pages_node(cpu_to_node(cpu),
GFP_KERNEL | __GFP_ZERO, order);
if (page == NULL) {
kfpu_fini();
return (-ENOMEM);
}
zfs_kfpu_fpregs[cpu] = page_address(page);
}
return (0);
}
#define kfpu_allowed() 1
/*
* FPU save and restore instructions.
*/
#define __asm __asm__ __volatile__
#define kfpu_fxsave(addr) __asm("fxsave %0" : "=m" (*(addr)))
#define kfpu_fxsaveq(addr) __asm("fxsaveq %0" : "=m" (*(addr)))
#define kfpu_fnsave(addr) __asm("fnsave %0; fwait" : "=m" (*(addr)))
#define kfpu_fxrstor(addr) __asm("fxrstor %0" : : "m" (*(addr)))
#define kfpu_fxrstorq(addr) __asm("fxrstorq %0" : : "m" (*(addr)))
#define kfpu_frstor(addr) __asm("frstor %0" : : "m" (*(addr)))
#define kfpu_fxsr_clean(rval) __asm("fnclex; emms; fildl %P[addr]" \
: : [addr] "m" (rval));
#define kfpu_do_xsave(instruction, addr, mask) \
{ \
uint32_t low, hi; \
\
low = mask; \
hi = (uint64_t)(mask) >> 32; \
__asm(instruction " %[dst]\n\t" \
: \
: [dst] "m" (*(addr)), "a" (low), "d" (hi) \
: "memory"); \
}
static inline void
kfpu_save_fxsr(uint8_t *addr)
{
if (IS_ENABLED(CONFIG_X86_32))
kfpu_fxsave(addr);
else
kfpu_fxsaveq(addr);
}
static inline void
kfpu_save_fsave(uint8_t *addr)
{
kfpu_fnsave(addr);
}
static inline void
kfpu_begin(void)
{
/*
* Preemption and interrupts must be disabled for the critical
* region where the FPU state is being modified.
*/
preempt_disable();
local_irq_disable();
/*
* The current FPU registers need to be preserved by kfpu_begin()
* and restored by kfpu_end(). They are stored in a dedicated
* per-cpu variable, not in the task struct, this allows any user
* FPU state to be correctly preserved and restored.
*/
uint8_t *state = zfs_kfpu_fpregs[smp_processor_id()];
#if defined(HAVE_XSAVES)
if (static_cpu_has(X86_FEATURE_XSAVES)) {
- kfpu_do_xsave("xsaves", state, ~0);
+ kfpu_do_xsave("xsaves", state, ~XFEATURE_MASK_XTILE);
return;
}
#endif
#if defined(HAVE_XSAVEOPT)
if (static_cpu_has(X86_FEATURE_XSAVEOPT)) {
- kfpu_do_xsave("xsaveopt", state, ~0);
+ kfpu_do_xsave("xsaveopt", state, ~XFEATURE_MASK_XTILE);
return;
}
#endif
if (static_cpu_has(X86_FEATURE_XSAVE)) {
- kfpu_do_xsave("xsave", state, ~0);
+ kfpu_do_xsave("xsave", state, ~XFEATURE_MASK_XTILE);
} else if (static_cpu_has(X86_FEATURE_FXSR)) {
kfpu_save_fxsr(state);
} else {
kfpu_save_fsave(state);
}
}
#define kfpu_do_xrstor(instruction, addr, mask) \
{ \
uint32_t low, hi; \
\
low = mask; \
hi = (uint64_t)(mask) >> 32; \
__asm(instruction " %[src]" \
: \
: [src] "m" (*(addr)), "a" (low), "d" (hi) \
: "memory"); \
}
static inline void
kfpu_restore_fxsr(uint8_t *addr)
{
/*
* On AuthenticAMD K7 and K8 processors the fxrstor instruction only
* restores the _x87 FOP, FIP, and FDP registers when an exception
* is pending. Clean the _x87 state to force the restore.
*/
if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK)))
kfpu_fxsr_clean(addr);
if (IS_ENABLED(CONFIG_X86_32)) {
kfpu_fxrstor(addr);
} else {
kfpu_fxrstorq(addr);
}
}
static inline void
kfpu_restore_fsave(uint8_t *addr)
{
kfpu_frstor(addr);
}
static inline void
kfpu_end(void)
{
uint8_t *state = zfs_kfpu_fpregs[smp_processor_id()];
#if defined(HAVE_XSAVES)
if (static_cpu_has(X86_FEATURE_XSAVES)) {
- kfpu_do_xrstor("xrstors", state, ~0);
+ kfpu_do_xrstor("xrstors", state, ~XFEATURE_MASK_XTILE);
goto out;
}
#endif
if (static_cpu_has(X86_FEATURE_XSAVE)) {
- kfpu_do_xrstor("xrstor", state, ~0);
+ kfpu_do_xrstor("xrstor", state, ~XFEATURE_MASK_XTILE);
} else if (static_cpu_has(X86_FEATURE_FXSR)) {
kfpu_restore_fxsr(state);
} else {
kfpu_restore_fsave(state);
}
out:
local_irq_enable();
preempt_enable();
}
#else
#error "Exactly one of KERNEL_EXPORTS_X86_FPU or HAVE_KERNEL_FPU_INTERNAL" \
" must be defined"
#endif /* defined(HAVE_KERNEL_FPU_INTERNAL */
#endif /* defined(KERNEL_EXPORTS_X86_FPU) */
/*
* Linux kernel provides an interface for CPU feature testing.
*/
/*
* Detect register set support
*/
/*
* Check if OS supports AVX and AVX2 by checking XCR0
* Only call this function if CPUID indicates that AVX feature is
* supported by the CPU, otherwise it might be an illegal instruction.
*/
static inline uint64_t
zfs_xgetbv(uint32_t index)
{
uint32_t eax, edx;
/* xgetbv - instruction byte code */
__asm__ __volatile__(".byte 0x0f; .byte 0x01; .byte 0xd0"
: "=a" (eax), "=d" (edx)
: "c" (index));
return ((((uint64_t)edx)<<32) | (uint64_t)eax);
}
static inline boolean_t
__simd_state_enabled(const uint64_t state)
{
boolean_t has_osxsave;
uint64_t xcr0;
#if defined(X86_FEATURE_OSXSAVE)
has_osxsave = !!boot_cpu_has(X86_FEATURE_OSXSAVE);
#else
has_osxsave = B_FALSE;
#endif
if (!has_osxsave)
return (B_FALSE);
xcr0 = zfs_xgetbv(0);
return ((xcr0 & state) == state);
}
#define _XSTATE_SSE_AVX (0x2 | 0x4)
#define _XSTATE_AVX512 (0xE0 | _XSTATE_SSE_AVX)
#define __ymm_enabled() __simd_state_enabled(_XSTATE_SSE_AVX)
#define __zmm_enabled() __simd_state_enabled(_XSTATE_AVX512)
/*
* Check if SSE instruction set is available
*/
static inline boolean_t
zfs_sse_available(void)
{
return (!!boot_cpu_has(X86_FEATURE_XMM));
}
/*
* Check if SSE2 instruction set is available
*/
static inline boolean_t
zfs_sse2_available(void)
{
return (!!boot_cpu_has(X86_FEATURE_XMM2));
}
/*
* Check if SSE3 instruction set is available
*/
static inline boolean_t
zfs_sse3_available(void)
{
return (!!boot_cpu_has(X86_FEATURE_XMM3));
}
/*
* Check if SSSE3 instruction set is available
*/
static inline boolean_t
zfs_ssse3_available(void)
{
return (!!boot_cpu_has(X86_FEATURE_SSSE3));
}
/*
* Check if SSE4.1 instruction set is available
*/
static inline boolean_t
zfs_sse4_1_available(void)
{
return (!!boot_cpu_has(X86_FEATURE_XMM4_1));
}
/*
* Check if SSE4.2 instruction set is available
*/
static inline boolean_t
zfs_sse4_2_available(void)
{
return (!!boot_cpu_has(X86_FEATURE_XMM4_2));
}
/*
* Check if AVX instruction set is available
*/
static inline boolean_t
zfs_avx_available(void)
{
return (boot_cpu_has(X86_FEATURE_AVX) && __ymm_enabled());
}
/*
* Check if AVX2 instruction set is available
*/
static inline boolean_t
zfs_avx2_available(void)
{
return (boot_cpu_has(X86_FEATURE_AVX2) && __ymm_enabled());
}
/*
* Check if BMI1 instruction set is available
*/
static inline boolean_t
zfs_bmi1_available(void)
{
#if defined(X86_FEATURE_BMI1)
return (!!boot_cpu_has(X86_FEATURE_BMI1));
#else
return (B_FALSE);
#endif
}
/*
* Check if BMI2 instruction set is available
*/
static inline boolean_t
zfs_bmi2_available(void)
{
#if defined(X86_FEATURE_BMI2)
return (!!boot_cpu_has(X86_FEATURE_BMI2));
#else
return (B_FALSE);
#endif
}
/*
* Check if AES instruction set is available
*/
static inline boolean_t
zfs_aes_available(void)
{
#if defined(X86_FEATURE_AES)
return (!!boot_cpu_has(X86_FEATURE_AES));
#else
return (B_FALSE);
#endif
}
/*
* Check if PCLMULQDQ instruction set is available
*/
static inline boolean_t
zfs_pclmulqdq_available(void)
{
#if defined(X86_FEATURE_PCLMULQDQ)
return (!!boot_cpu_has(X86_FEATURE_PCLMULQDQ));
#else
return (B_FALSE);
#endif
}
/*
* Check if MOVBE instruction is available
*/
static inline boolean_t
zfs_movbe_available(void)
{
#if defined(X86_FEATURE_MOVBE)
return (!!boot_cpu_has(X86_FEATURE_MOVBE));
#else
return (B_FALSE);
#endif
}
/*
* Check if SHA_NI instruction set is available
*/
static inline boolean_t
zfs_shani_available(void)
{
#if defined(X86_FEATURE_SHA_NI)
return (!!boot_cpu_has(X86_FEATURE_SHA_NI));
#else
return (B_FALSE);
#endif
}
/*
* AVX-512 family of instruction sets:
*
* AVX512F Foundation
* AVX512CD Conflict Detection Instructions
* AVX512ER Exponential and Reciprocal Instructions
* AVX512PF Prefetch Instructions
*
* AVX512BW Byte and Word Instructions
* AVX512DQ Double-word and Quadword Instructions
* AVX512VL Vector Length Extensions
*
* AVX512IFMA Integer Fused Multiply Add (Not supported by kernel 4.4)
* AVX512VBMI Vector Byte Manipulation Instructions
*/
/*
* Check if AVX512F instruction set is available
*/
static inline boolean_t
zfs_avx512f_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(X86_FEATURE_AVX512F)
has_avx512 = !!boot_cpu_has(X86_FEATURE_AVX512F);
#endif
return (has_avx512 && __zmm_enabled());
}
/*
* Check if AVX512CD instruction set is available
*/
static inline boolean_t
zfs_avx512cd_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(X86_FEATURE_AVX512CD)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512CD);
#endif
return (has_avx512 && __zmm_enabled());
}
/*
* Check if AVX512ER instruction set is available
*/
static inline boolean_t
zfs_avx512er_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(X86_FEATURE_AVX512ER)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512ER);
#endif
return (has_avx512 && __zmm_enabled());
}
/*
* Check if AVX512PF instruction set is available
*/
static inline boolean_t
zfs_avx512pf_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(X86_FEATURE_AVX512PF)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512PF);
#endif
return (has_avx512 && __zmm_enabled());
}
/*
* Check if AVX512BW instruction set is available
*/
static inline boolean_t
zfs_avx512bw_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(X86_FEATURE_AVX512BW)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512BW);
#endif
return (has_avx512 && __zmm_enabled());
}
/*
* Check if AVX512DQ instruction set is available
*/
static inline boolean_t
zfs_avx512dq_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(X86_FEATURE_AVX512DQ)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512DQ);
#endif
return (has_avx512 && __zmm_enabled());
}
/*
* Check if AVX512VL instruction set is available
*/
static inline boolean_t
zfs_avx512vl_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(X86_FEATURE_AVX512VL)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512VL);
#endif
return (has_avx512 && __zmm_enabled());
}
/*
* Check if AVX512IFMA instruction set is available
*/
static inline boolean_t
zfs_avx512ifma_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(X86_FEATURE_AVX512IFMA)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512IFMA);
#endif
return (has_avx512 && __zmm_enabled());
}
/*
* Check if AVX512VBMI instruction set is available
*/
static inline boolean_t
zfs_avx512vbmi_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(X86_FEATURE_AVX512VBMI)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512VBMI);
#endif
return (has_avx512 && __zmm_enabled());
}
#endif /* defined(__x86) */
#endif /* _LINUX_SIMD_X86_H */
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/kmem_cache.h b/sys/contrib/openzfs/include/os/linux/spl/sys/kmem_cache.h
index cc9cafa84f99..20eeadc46e10 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/kmem_cache.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/kmem_cache.h
@@ -1,212 +1,220 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SPL_KMEM_CACHE_H
#define _SPL_KMEM_CACHE_H
#include <sys/taskq.h>
/*
* Slab allocation interfaces. The SPL slab differs from the standard
* Linux SLAB or SLUB primarily in that each cache may be backed by slabs
* allocated from the physical or virtual memory address space. The virtual
* slabs allow for good behavior when allocation large objects of identical
* size. This slab implementation also supports both constructors and
* destructors which the Linux slab does not.
*/
typedef enum kmc_bit {
KMC_BIT_NODEBUG = 1, /* Default behavior */
KMC_BIT_KVMEM = 7, /* Use kvmalloc linux allocator */
KMC_BIT_SLAB = 8, /* Use Linux slab cache */
KMC_BIT_DEADLOCKED = 14, /* Deadlock detected */
KMC_BIT_GROWING = 15, /* Growing in progress */
KMC_BIT_REAPING = 16, /* Reaping in progress */
KMC_BIT_DESTROY = 17, /* Destroy in progress */
KMC_BIT_TOTAL = 18, /* Proc handler helper bit */
KMC_BIT_ALLOC = 19, /* Proc handler helper bit */
KMC_BIT_MAX = 20, /* Proc handler helper bit */
} kmc_bit_t;
/* kmem move callback return values */
typedef enum kmem_cbrc {
KMEM_CBRC_YES = 0, /* Object moved */
KMEM_CBRC_NO = 1, /* Object not moved */
KMEM_CBRC_LATER = 2, /* Object not moved, try again later */
KMEM_CBRC_DONT_NEED = 3, /* Neither object is needed */
KMEM_CBRC_DONT_KNOW = 4, /* Object unknown */
} kmem_cbrc_t;
#define KMC_NODEBUG (1 << KMC_BIT_NODEBUG)
#define KMC_KVMEM (1 << KMC_BIT_KVMEM)
#define KMC_SLAB (1 << KMC_BIT_SLAB)
#define KMC_DEADLOCKED (1 << KMC_BIT_DEADLOCKED)
#define KMC_GROWING (1 << KMC_BIT_GROWING)
#define KMC_REAPING (1 << KMC_BIT_REAPING)
#define KMC_DESTROY (1 << KMC_BIT_DESTROY)
#define KMC_TOTAL (1 << KMC_BIT_TOTAL)
#define KMC_ALLOC (1 << KMC_BIT_ALLOC)
#define KMC_MAX (1 << KMC_BIT_MAX)
#define KMC_REAP_CHUNK INT_MAX
#define KMC_DEFAULT_SEEKS 1
#define KMC_RECLAIM_ONCE 0x1 /* Force a single shrinker pass */
extern struct list_head spl_kmem_cache_list;
extern struct rw_semaphore spl_kmem_cache_sem;
#define SKM_MAGIC 0x2e2e2e2e
#define SKO_MAGIC 0x20202020
#define SKS_MAGIC 0x22222222
#define SKC_MAGIC 0x2c2c2c2c
#define SPL_KMEM_CACHE_OBJ_PER_SLAB 8 /* Target objects per slab */
#define SPL_KMEM_CACHE_ALIGN 8 /* Default object alignment */
#ifdef _LP64
#define SPL_KMEM_CACHE_MAX_SIZE 32 /* Max slab size in MB */
#else
#define SPL_KMEM_CACHE_MAX_SIZE 4 /* Max slab size in MB */
#endif
#define SPL_MAX_ORDER (MAX_ORDER - 3)
#define SPL_MAX_ORDER_NR_PAGES (1 << (SPL_MAX_ORDER - 1))
#ifdef CONFIG_SLUB
#define SPL_MAX_KMEM_CACHE_ORDER PAGE_ALLOC_COSTLY_ORDER
#define SPL_MAX_KMEM_ORDER_NR_PAGES (1 << (SPL_MAX_KMEM_CACHE_ORDER - 1))
#else
#define SPL_MAX_KMEM_ORDER_NR_PAGES (KMALLOC_MAX_SIZE >> PAGE_SHIFT)
#endif
typedef int (*spl_kmem_ctor_t)(void *, void *, int);
typedef void (*spl_kmem_dtor_t)(void *, void *);
typedef struct spl_kmem_magazine {
uint32_t skm_magic; /* Sanity magic */
uint32_t skm_avail; /* Available objects */
uint32_t skm_size; /* Magazine size */
uint32_t skm_refill; /* Batch refill size */
struct spl_kmem_cache *skm_cache; /* Owned by cache */
unsigned int skm_cpu; /* Owned by cpu */
void *skm_objs[0]; /* Object pointers */
} spl_kmem_magazine_t;
typedef struct spl_kmem_obj {
uint32_t sko_magic; /* Sanity magic */
void *sko_addr; /* Buffer address */
struct spl_kmem_slab *sko_slab; /* Owned by slab */
struct list_head sko_list; /* Free object list linkage */
} spl_kmem_obj_t;
typedef struct spl_kmem_slab {
uint32_t sks_magic; /* Sanity magic */
uint32_t sks_objs; /* Objects per slab */
struct spl_kmem_cache *sks_cache; /* Owned by cache */
struct list_head sks_list; /* Slab list linkage */
struct list_head sks_free_list; /* Free object list */
unsigned long sks_age; /* Last modify jiffie */
uint32_t sks_ref; /* Ref count used objects */
} spl_kmem_slab_t;
typedef struct spl_kmem_alloc {
struct spl_kmem_cache *ska_cache; /* Owned by cache */
int ska_flags; /* Allocation flags */
taskq_ent_t ska_tqe; /* Task queue entry */
} spl_kmem_alloc_t;
typedef struct spl_kmem_emergency {
struct rb_node ske_node; /* Emergency tree linkage */
unsigned long ske_obj; /* Buffer address */
} spl_kmem_emergency_t;
typedef struct spl_kmem_cache {
uint32_t skc_magic; /* Sanity magic */
uint32_t skc_name_size; /* Name length */
char *skc_name; /* Name string */
spl_kmem_magazine_t **skc_mag; /* Per-CPU warm cache */
uint32_t skc_mag_size; /* Magazine size */
uint32_t skc_mag_refill; /* Magazine refill count */
spl_kmem_ctor_t skc_ctor; /* Constructor */
spl_kmem_dtor_t skc_dtor; /* Destructor */
void *skc_private; /* Private data */
void *skc_vmp; /* Unused */
struct kmem_cache *skc_linux_cache; /* Linux slab cache if used */
unsigned long skc_flags; /* Flags */
uint32_t skc_obj_size; /* Object size */
uint32_t skc_obj_align; /* Object alignment */
uint32_t skc_slab_objs; /* Objects per slab */
uint32_t skc_slab_size; /* Slab size */
atomic_t skc_ref; /* Ref count callers */
taskqid_t skc_taskqid; /* Slab reclaim task */
struct list_head skc_list; /* List of caches linkage */
struct list_head skc_complete_list; /* Completely alloc'ed */
struct list_head skc_partial_list; /* Partially alloc'ed */
struct rb_root skc_emergency_tree; /* Min sized objects */
spinlock_t skc_lock; /* Cache lock */
spl_wait_queue_head_t skc_waitq; /* Allocation waiters */
uint64_t skc_slab_fail; /* Slab alloc failures */
uint64_t skc_slab_create; /* Slab creates */
uint64_t skc_slab_destroy; /* Slab destroys */
uint64_t skc_slab_total; /* Slab total current */
uint64_t skc_slab_alloc; /* Slab alloc current */
uint64_t skc_slab_max; /* Slab max historic */
uint64_t skc_obj_total; /* Obj total current */
uint64_t skc_obj_alloc; /* Obj alloc current */
struct percpu_counter skc_linux_alloc; /* Linux-backed Obj alloc */
uint64_t skc_obj_max; /* Obj max historic */
uint64_t skc_obj_deadlock; /* Obj emergency deadlocks */
uint64_t skc_obj_emergency; /* Obj emergency current */
uint64_t skc_obj_emergency_max; /* Obj emergency max */
} spl_kmem_cache_t;
#define kmem_cache_t spl_kmem_cache_t
extern spl_kmem_cache_t *spl_kmem_cache_create(const char *name, size_t size,
size_t align, spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor,
void *reclaim, void *priv, void *vmp, int flags);
extern void spl_kmem_cache_set_move(spl_kmem_cache_t *,
kmem_cbrc_t (*)(void *, void *, size_t, void *));
extern void spl_kmem_cache_destroy(spl_kmem_cache_t *skc);
extern void *spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags);
extern void spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj);
extern void spl_kmem_cache_set_allocflags(spl_kmem_cache_t *skc, gfp_t flags);
extern void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc);
extern void spl_kmem_reap(void);
extern uint64_t spl_kmem_cache_inuse(kmem_cache_t *cache);
extern uint64_t spl_kmem_cache_entry_size(kmem_cache_t *cache);
#define kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl) \
spl_kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl)
#define kmem_cache_set_move(skc, move) spl_kmem_cache_set_move(skc, move)
#define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc)
+/*
+ * This is necessary to be compatible with other kernel modules
+ * or in-tree filesystem that may define kmem_cache_alloc,
+ * like bcachefs does it now.
+ */
+#ifdef kmem_cache_alloc
+#undef kmem_cache_alloc
+#endif
#define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags)
#define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj)
#define kmem_cache_reap_now(skc) spl_kmem_cache_reap_now(skc)
#define kmem_reap() spl_kmem_reap()
/*
* The following functions are only available for internal use.
*/
extern int spl_kmem_cache_init(void);
extern void spl_kmem_cache_fini(void);
#endif /* _SPL_KMEM_CACHE_H */
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/types.h b/sys/contrib/openzfs/include/os/linux/spl/sys/types.h
index a7666187ec23..d89a91c36f92 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/types.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/types.h
@@ -1,73 +1,73 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SPL_TYPES_H
#define _SPL_TYPES_H
#include <linux/types.h>
typedef enum {
B_FALSE = 0,
B_TRUE = 1
} boolean_t;
typedef unsigned char uchar_t;
typedef unsigned short ushort_t;
typedef unsigned int uint_t;
typedef unsigned long ulong_t;
typedef unsigned long long u_longlong_t;
typedef long long longlong_t;
-typedef unsigned long intptr_t;
+typedef long intptr_t;
typedef unsigned long long rlim64_t;
typedef struct task_struct kthread_t;
typedef struct task_struct proc_t;
typedef int id_t;
typedef short pri_t;
typedef short index_t;
typedef longlong_t offset_t;
typedef u_longlong_t u_offset_t;
typedef ulong_t pgcnt_t;
typedef int major_t;
typedef int minor_t;
struct user_namespace;
#ifdef HAVE_IOPS_CREATE_IDMAP
#include <linux/refcount.h>
struct mnt_idmap {
struct user_namespace *owner;
refcount_t count;
};
typedef struct mnt_idmap zidmap_t;
#define idmap_owner(p) (((struct mnt_idmap *)p)->owner)
#else
typedef struct user_namespace zidmap_t;
#define idmap_owner(p) ((struct user_namespace *)p)
#endif
extern zidmap_t *zfs_init_idmap;
#endif /* _SPL_TYPES_H */
diff --git a/sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h b/sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h
index 2b302e9dab07..0bd20f64897d 100644
--- a/sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h
+++ b/sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h
@@ -1,213 +1,266 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2011, Lawrence Livermore National Security, LLC.
*/
#ifndef _SYS_ZPL_H
#define _SYS_ZPL_H
#include <sys/mntent.h>
#include <sys/vfs.h>
#include <linux/aio.h>
#include <linux/dcache_compat.h>
#include <linux/exportfs.h>
#include <linux/falloc.h>
#include <linux/parser.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/vfs_compat.h>
#include <linux/writeback.h>
#include <linux/xattr_compat.h>
/* zpl_inode.c */
extern void zpl_vap_init(vattr_t *vap, struct inode *dir,
umode_t mode, cred_t *cr, zidmap_t *mnt_ns);
extern const struct inode_operations zpl_inode_operations;
#ifdef HAVE_RENAME2_OPERATIONS_WRAPPER
extern const struct inode_operations_wrapper zpl_dir_inode_operations;
#else
extern const struct inode_operations zpl_dir_inode_operations;
#endif
extern const struct inode_operations zpl_symlink_inode_operations;
extern const struct inode_operations zpl_special_inode_operations;
/* zpl_file.c */
extern const struct address_space_operations zpl_address_space_operations;
+#ifdef HAVE_VFS_FILE_OPERATIONS_EXTEND
+extern const struct file_operations_extend zpl_file_operations;
+#else
extern const struct file_operations zpl_file_operations;
+#endif
extern const struct file_operations zpl_dir_file_operations;
/* zpl_super.c */
extern void zpl_prune_sb(int64_t nr_to_scan, void *arg);
extern const struct super_operations zpl_super_operations;
extern const struct export_operations zpl_export_operations;
extern struct file_system_type zpl_fs_type;
/* zpl_xattr.c */
extern ssize_t zpl_xattr_list(struct dentry *dentry, char *buf, size_t size);
extern int zpl_xattr_security_init(struct inode *ip, struct inode *dip,
const struct qstr *qstr);
#if defined(CONFIG_FS_POSIX_ACL)
#if defined(HAVE_SET_ACL)
#if defined(HAVE_SET_ACL_IDMAP_DENTRY)
extern int zpl_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type);
#elif defined(HAVE_SET_ACL_USERNS)
extern int zpl_set_acl(struct user_namespace *userns, struct inode *ip,
struct posix_acl *acl, int type);
#elif defined(HAVE_SET_ACL_USERNS_DENTRY_ARG2)
extern int zpl_set_acl(struct user_namespace *userns, struct dentry *dentry,
struct posix_acl *acl, int type);
#else
extern int zpl_set_acl(struct inode *ip, struct posix_acl *acl, int type);
#endif /* HAVE_SET_ACL_USERNS */
#endif /* HAVE_SET_ACL */
#if defined(HAVE_GET_ACL_RCU) || defined(HAVE_GET_INODE_ACL)
extern struct posix_acl *zpl_get_acl(struct inode *ip, int type, bool rcu);
#elif defined(HAVE_GET_ACL)
extern struct posix_acl *zpl_get_acl(struct inode *ip, int type);
#endif
extern int zpl_init_acl(struct inode *ip, struct inode *dir);
extern int zpl_chmod_acl(struct inode *ip);
#else
static inline int
zpl_init_acl(struct inode *ip, struct inode *dir)
{
return (0);
}
static inline int
zpl_chmod_acl(struct inode *ip)
{
return (0);
}
#endif /* CONFIG_FS_POSIX_ACL */
extern xattr_handler_t *zpl_xattr_handlers[];
/* zpl_ctldir.c */
extern const struct file_operations zpl_fops_root;
extern const struct inode_operations zpl_ops_root;
extern const struct file_operations zpl_fops_snapdir;
extern const struct inode_operations zpl_ops_snapdir;
extern const struct file_operations zpl_fops_shares;
extern const struct inode_operations zpl_ops_shares;
#if defined(HAVE_VFS_ITERATE) || defined(HAVE_VFS_ITERATE_SHARED)
#define ZPL_DIR_CONTEXT_INIT(_dirent, _actor, _pos) { \
.actor = _actor, \
.pos = _pos, \
}
typedef struct dir_context zpl_dir_context_t;
#define zpl_dir_emit dir_emit
#define zpl_dir_emit_dot dir_emit_dot
#define zpl_dir_emit_dotdot dir_emit_dotdot
#define zpl_dir_emit_dots dir_emit_dots
#else
typedef struct zpl_dir_context {
void *dirent;
const filldir_t actor;
loff_t pos;
} zpl_dir_context_t;
#define ZPL_DIR_CONTEXT_INIT(_dirent, _actor, _pos) { \
.dirent = _dirent, \
.actor = _actor, \
.pos = _pos, \
}
static inline bool
zpl_dir_emit(zpl_dir_context_t *ctx, const char *name, int namelen,
uint64_t ino, unsigned type)
{
return (!ctx->actor(ctx->dirent, name, namelen, ctx->pos, ino, type));
}
static inline bool
zpl_dir_emit_dot(struct file *file, zpl_dir_context_t *ctx)
{
return (ctx->actor(ctx->dirent, ".", 1, ctx->pos,
file_inode(file)->i_ino, DT_DIR) == 0);
}
static inline bool
zpl_dir_emit_dotdot(struct file *file, zpl_dir_context_t *ctx)
{
return (ctx->actor(ctx->dirent, "..", 2, ctx->pos,
parent_ino(file_dentry(file)), DT_DIR) == 0);
}
static inline bool
zpl_dir_emit_dots(struct file *file, zpl_dir_context_t *ctx)
{
if (ctx->pos == 0) {
if (!zpl_dir_emit_dot(file, ctx))
return (false);
ctx->pos = 1;
}
if (ctx->pos == 1) {
if (!zpl_dir_emit_dotdot(file, ctx))
return (false);
ctx->pos = 2;
}
return (true);
}
#endif /* HAVE_VFS_ITERATE */
+
+/* zpl_file_range.c */
+
+/* handlers for file_operations of the same name */
+extern ssize_t zpl_copy_file_range(struct file *src_file, loff_t src_off,
+ struct file *dst_file, loff_t dst_off, size_t len, unsigned int flags);
+extern loff_t zpl_remap_file_range(struct file *src_file, loff_t src_off,
+ struct file *dst_file, loff_t dst_off, loff_t len, unsigned int flags);
+extern int zpl_clone_file_range(struct file *src_file, loff_t src_off,
+ struct file *dst_file, loff_t dst_off, uint64_t len);
+extern int zpl_dedupe_file_range(struct file *src_file, loff_t src_off,
+ struct file *dst_file, loff_t dst_off, uint64_t len);
+
+/* compat for FICLONE/FICLONERANGE/FIDEDUPERANGE ioctls */
+typedef struct {
+ int64_t fcr_src_fd;
+ uint64_t fcr_src_offset;
+ uint64_t fcr_src_length;
+ uint64_t fcr_dest_offset;
+} zfs_ioc_compat_file_clone_range_t;
+
+typedef struct {
+ int64_t fdri_dest_fd;
+ uint64_t fdri_dest_offset;
+ uint64_t fdri_bytes_deduped;
+ int32_t fdri_status;
+ uint32_t fdri_reserved;
+} zfs_ioc_compat_dedupe_range_info_t;
+
+typedef struct {
+ uint64_t fdr_src_offset;
+ uint64_t fdr_src_length;
+ uint16_t fdr_dest_count;
+ uint16_t fdr_reserved1;
+ uint32_t fdr_reserved2;
+ zfs_ioc_compat_dedupe_range_info_t fdr_info[];
+} zfs_ioc_compat_dedupe_range_t;
+
+#define ZFS_IOC_COMPAT_FICLONE _IOW(0x94, 9, int)
+#define ZFS_IOC_COMPAT_FICLONERANGE \
+ _IOW(0x94, 13, zfs_ioc_compat_file_clone_range_t)
+#define ZFS_IOC_COMPAT_FIDEDUPERANGE \
+ _IOWR(0x94, 54, zfs_ioc_compat_dedupe_range_t)
+
+extern long zpl_ioctl_ficlone(struct file *filp, void *arg);
+extern long zpl_ioctl_ficlonerange(struct file *filp, void *arg);
+extern long zpl_ioctl_fideduperange(struct file *filp, void *arg);
+
+
#if defined(HAVE_INODE_TIMESTAMP_TRUNCATE)
#define zpl_inode_timestamp_truncate(ts, ip) timestamp_truncate(ts, ip)
#elif defined(HAVE_INODE_TIMESPEC64_TIMES)
#define zpl_inode_timestamp_truncate(ts, ip) \
timespec64_trunc(ts, (ip)->i_sb->s_time_gran)
#else
#define zpl_inode_timestamp_truncate(ts, ip) \
timespec_trunc(ts, (ip)->i_sb->s_time_gran)
#endif
#if defined(HAVE_INODE_OWNER_OR_CAPABLE)
#define zpl_inode_owner_or_capable(ns, ip) inode_owner_or_capable(ip)
#elif defined(HAVE_INODE_OWNER_OR_CAPABLE_USERNS)
#define zpl_inode_owner_or_capable(ns, ip) inode_owner_or_capable(ns, ip)
#elif defined(HAVE_INODE_OWNER_OR_CAPABLE_IDMAP)
#define zpl_inode_owner_or_capable(idmap, ip) inode_owner_or_capable(idmap, ip)
#else
#error "Unsupported kernel"
#endif
#if defined(HAVE_SETATTR_PREPARE_USERNS) || defined(HAVE_SETATTR_PREPARE_IDMAP)
#define zpl_setattr_prepare(ns, dentry, ia) setattr_prepare(ns, dentry, ia)
#else
/*
* Use kernel-provided version, or our own from
* linux/vfs_compat.h
*/
#define zpl_setattr_prepare(ns, dentry, ia) setattr_prepare(dentry, ia)
#endif
#endif /* _SYS_ZPL_H */
diff --git a/sys/contrib/openzfs/include/sys/bpobj.h b/sys/contrib/openzfs/include/sys/bpobj.h
index f3384f526454..81bc0fe21086 100644
--- a/sys/contrib/openzfs/include/sys/bpobj.h
+++ b/sys/contrib/openzfs/include/sys/bpobj.h
@@ -1,107 +1,107 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019 by Delphix. All rights reserved.
*/
#ifndef _SYS_BPOBJ_H
#define _SYS_BPOBJ_H
#include <sys/dmu.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/zio.h>
#include <sys/zfs_context.h>
#include <sys/bplist.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct bpobj_phys {
/*
* This is the bonus buffer for the dead lists. The object's
* contents is an array of bpo_entries blkptr_t's, representing
* a total of bpo_bytes physical space.
*/
uint64_t bpo_num_blkptrs;
uint64_t bpo_bytes;
uint64_t bpo_comp;
uint64_t bpo_uncomp;
uint64_t bpo_subobjs;
uint64_t bpo_num_subobjs;
uint64_t bpo_num_freed;
} bpobj_phys_t;
#define BPOBJ_SIZE_V0 (2 * sizeof (uint64_t))
#define BPOBJ_SIZE_V1 (4 * sizeof (uint64_t))
#define BPOBJ_SIZE_V2 (6 * sizeof (uint64_t))
typedef struct bpobj {
kmutex_t bpo_lock;
objset_t *bpo_os;
uint64_t bpo_object;
- int bpo_epb;
+ uint32_t bpo_epb;
uint8_t bpo_havecomp;
uint8_t bpo_havesubobj;
uint8_t bpo_havefreed;
bpobj_phys_t *bpo_phys;
dmu_buf_t *bpo_dbuf;
dmu_buf_t *bpo_cached_dbuf;
} bpobj_t;
typedef int bpobj_itor_t(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx);
uint64_t bpobj_alloc(objset_t *mos, int blocksize, dmu_tx_t *tx);
uint64_t bpobj_alloc_empty(objset_t *os, int blocksize, dmu_tx_t *tx);
void bpobj_free(objset_t *os, uint64_t obj, dmu_tx_t *tx);
void bpobj_decr_empty(objset_t *os, dmu_tx_t *tx);
int bpobj_open(bpobj_t *bpo, objset_t *mos, uint64_t object);
void bpobj_close(bpobj_t *bpo);
boolean_t bpobj_is_open(const bpobj_t *bpo);
int bpobj_iterate(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx);
int bpobj_iterate_nofree(bpobj_t *bpo, bpobj_itor_t func, void *, uint64_t *);
int livelist_bpobj_iterate_from_nofree(bpobj_t *bpo, bpobj_itor_t func,
void *arg, int64_t start);
void bpobj_enqueue_subobj(bpobj_t *bpo, uint64_t subobj, dmu_tx_t *tx);
void bpobj_prefetch_subobj(bpobj_t *bpo, uint64_t subobj);
void bpobj_enqueue(bpobj_t *bpo, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx);
int bpobj_space(bpobj_t *bpo,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp);
int bpobj_space_range(bpobj_t *bpo, uint64_t mintxg, uint64_t maxtxg,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp);
boolean_t bpobj_is_empty(bpobj_t *bpo);
int bplist_append_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_BPOBJ_H */
diff --git a/sys/contrib/openzfs/include/sys/brt.h b/sys/contrib/openzfs/include/sys/brt.h
index 0761159e3f5f..f73df95058d9 100644
--- a/sys/contrib/openzfs/include/sys/brt.h
+++ b/sys/contrib/openzfs/include/sys/brt.h
@@ -1,62 +1,63 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2020, 2021, 2022 by Pawel Jakub Dawidek
*/
#ifndef _SYS_BRT_H
#define _SYS_BRT_H
#include <sys/sysmacros.h>
#include <sys/types.h>
#include <sys/fs/zfs.h>
#include <sys/zio.h>
#include <sys/dmu.h>
#ifdef __cplusplus
extern "C" {
#endif
extern boolean_t brt_entry_decref(spa_t *spa, const blkptr_t *bp);
+extern uint64_t brt_entry_get_refcount(spa_t *spa, const blkptr_t *bp);
extern uint64_t brt_get_dspace(spa_t *spa);
extern uint64_t brt_get_used(spa_t *spa);
extern uint64_t brt_get_saved(spa_t *spa);
extern uint64_t brt_get_ratio(spa_t *spa);
extern boolean_t brt_maybe_exists(spa_t *spa, const blkptr_t *bp);
extern void brt_init(void);
extern void brt_fini(void);
extern void brt_pending_add(spa_t *spa, const blkptr_t *bp, dmu_tx_t *tx);
extern void brt_pending_remove(spa_t *spa, const blkptr_t *bp, dmu_tx_t *tx);
extern void brt_pending_apply(spa_t *spa, uint64_t txg);
extern void brt_create(spa_t *spa);
extern int brt_load(spa_t *spa);
extern void brt_unload(spa_t *spa);
extern void brt_sync(spa_t *spa, uint64_t txg);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_BRT_H */
diff --git a/sys/contrib/openzfs/include/sys/dmu.h b/sys/contrib/openzfs/include/sys/dmu.h
index 7e57d133c2ec..615ba8fe7496 100644
--- a/sys/contrib/openzfs/include/sys/dmu.h
+++ b/sys/contrib/openzfs/include/sys/dmu.h
@@ -1,1098 +1,1102 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
* Copyright 2014 HybridCluster. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2022 Hewlett Packard Enterprise Development LP.
*/
/* Portions Copyright 2010 Robert Milkowski */
#ifndef _SYS_DMU_H
#define _SYS_DMU_H
/*
* This file describes the interface that the DMU provides for its
* consumers.
*
* The DMU also interacts with the SPA. That interface is described in
* dmu_spa.h.
*/
#include <sys/zfs_context.h>
#include <sys/inttypes.h>
#include <sys/cred.h>
#include <sys/fs/zfs.h>
#include <sys/zio_compress.h>
#include <sys/zio_priority.h>
#include <sys/uio.h>
#include <sys/zfs_file.h>
#ifdef __cplusplus
extern "C" {
#endif
struct page;
struct vnode;
struct spa;
struct zilog;
struct zio;
struct blkptr;
struct zap_cursor;
struct dsl_dataset;
struct dsl_pool;
struct dnode;
struct drr_begin;
struct drr_end;
struct zbookmark_phys;
struct spa;
struct nvlist;
struct arc_buf;
struct zio_prop;
struct sa_handle;
struct dsl_crypto_params;
struct locked_range;
typedef struct objset objset_t;
typedef struct dmu_tx dmu_tx_t;
typedef struct dsl_dir dsl_dir_t;
typedef struct dnode dnode_t;
typedef enum dmu_object_byteswap {
DMU_BSWAP_UINT8,
DMU_BSWAP_UINT16,
DMU_BSWAP_UINT32,
DMU_BSWAP_UINT64,
DMU_BSWAP_ZAP,
DMU_BSWAP_DNODE,
DMU_BSWAP_OBJSET,
DMU_BSWAP_ZNODE,
DMU_BSWAP_OLDACL,
DMU_BSWAP_ACL,
/*
* Allocating a new byteswap type number makes the on-disk format
* incompatible with any other format that uses the same number.
*
* Data can usually be structured to work with one of the
* DMU_BSWAP_UINT* or DMU_BSWAP_ZAP types.
*/
DMU_BSWAP_NUMFUNCS
} dmu_object_byteswap_t;
#define DMU_OT_NEWTYPE 0x80
#define DMU_OT_METADATA 0x40
#define DMU_OT_ENCRYPTED 0x20
#define DMU_OT_BYTESWAP_MASK 0x1f
/*
* Defines a uint8_t object type. Object types specify if the data
* in the object is metadata (boolean) and how to byteswap the data
* (dmu_object_byteswap_t). All of the types created by this method
* are cached in the dbuf metadata cache.
*/
#define DMU_OT(byteswap, metadata, encrypted) \
(DMU_OT_NEWTYPE | \
((metadata) ? DMU_OT_METADATA : 0) | \
((encrypted) ? DMU_OT_ENCRYPTED : 0) | \
((byteswap) & DMU_OT_BYTESWAP_MASK))
#define DMU_OT_IS_VALID(ot) (((ot) & DMU_OT_NEWTYPE) ? \
((ot) & DMU_OT_BYTESWAP_MASK) < DMU_BSWAP_NUMFUNCS : \
(ot) < DMU_OT_NUMTYPES)
#define DMU_OT_IS_METADATA_CACHED(ot) (((ot) & DMU_OT_NEWTYPE) ? \
B_TRUE : dmu_ot[(ot)].ot_dbuf_metadata_cache)
/*
* MDB doesn't have dmu_ot; it defines these macros itself.
*/
#ifndef ZFS_MDB
#define DMU_OT_IS_METADATA_IMPL(ot) (dmu_ot[ot].ot_metadata)
#define DMU_OT_IS_ENCRYPTED_IMPL(ot) (dmu_ot[ot].ot_encrypt)
#define DMU_OT_BYTESWAP_IMPL(ot) (dmu_ot[ot].ot_byteswap)
#endif
#define DMU_OT_IS_METADATA(ot) (((ot) & DMU_OT_NEWTYPE) ? \
(((ot) & DMU_OT_METADATA) != 0) : \
DMU_OT_IS_METADATA_IMPL(ot))
#define DMU_OT_IS_DDT(ot) \
((ot) == DMU_OT_DDT_ZAP)
#define DMU_OT_IS_CRITICAL(ot) \
(DMU_OT_IS_METADATA(ot) && \
(ot) != DMU_OT_DNODE && \
(ot) != DMU_OT_DIRECTORY_CONTENTS && \
(ot) != DMU_OT_SA)
/* Note: ztest uses DMU_OT_UINT64_OTHER as a proxy for file blocks */
#define DMU_OT_IS_FILE(ot) \
((ot) == DMU_OT_PLAIN_FILE_CONTENTS || (ot) == DMU_OT_UINT64_OTHER)
#define DMU_OT_IS_ENCRYPTED(ot) (((ot) & DMU_OT_NEWTYPE) ? \
(((ot) & DMU_OT_ENCRYPTED) != 0) : \
DMU_OT_IS_ENCRYPTED_IMPL(ot))
/*
* These object types use bp_fill != 1 for their L0 bp's. Therefore they can't
* have their data embedded (i.e. use a BP_IS_EMBEDDED() bp), because bp_fill
* is repurposed for embedded BPs.
*/
#define DMU_OT_HAS_FILL(ot) \
((ot) == DMU_OT_DNODE || (ot) == DMU_OT_OBJSET)
#define DMU_OT_BYTESWAP(ot) (((ot) & DMU_OT_NEWTYPE) ? \
((ot) & DMU_OT_BYTESWAP_MASK) : \
DMU_OT_BYTESWAP_IMPL(ot))
typedef enum dmu_object_type {
DMU_OT_NONE,
/* general: */
DMU_OT_OBJECT_DIRECTORY, /* ZAP */
DMU_OT_OBJECT_ARRAY, /* UINT64 */
DMU_OT_PACKED_NVLIST, /* UINT8 (XDR by nvlist_pack/unpack) */
DMU_OT_PACKED_NVLIST_SIZE, /* UINT64 */
DMU_OT_BPOBJ, /* UINT64 */
DMU_OT_BPOBJ_HDR, /* UINT64 */
/* spa: */
DMU_OT_SPACE_MAP_HEADER, /* UINT64 */
DMU_OT_SPACE_MAP, /* UINT64 */
/* zil: */
DMU_OT_INTENT_LOG, /* UINT64 */
/* dmu: */
DMU_OT_DNODE, /* DNODE */
DMU_OT_OBJSET, /* OBJSET */
/* dsl: */
DMU_OT_DSL_DIR, /* UINT64 */
DMU_OT_DSL_DIR_CHILD_MAP, /* ZAP */
DMU_OT_DSL_DS_SNAP_MAP, /* ZAP */
DMU_OT_DSL_PROPS, /* ZAP */
DMU_OT_DSL_DATASET, /* UINT64 */
/* zpl: */
DMU_OT_ZNODE, /* ZNODE */
DMU_OT_OLDACL, /* Old ACL */
DMU_OT_PLAIN_FILE_CONTENTS, /* UINT8 */
DMU_OT_DIRECTORY_CONTENTS, /* ZAP */
DMU_OT_MASTER_NODE, /* ZAP */
DMU_OT_UNLINKED_SET, /* ZAP */
/* zvol: */
DMU_OT_ZVOL, /* UINT8 */
DMU_OT_ZVOL_PROP, /* ZAP */
/* other; for testing only! */
DMU_OT_PLAIN_OTHER, /* UINT8 */
DMU_OT_UINT64_OTHER, /* UINT64 */
DMU_OT_ZAP_OTHER, /* ZAP */
/* new object types: */
DMU_OT_ERROR_LOG, /* ZAP */
DMU_OT_SPA_HISTORY, /* UINT8 */
DMU_OT_SPA_HISTORY_OFFSETS, /* spa_his_phys_t */
DMU_OT_POOL_PROPS, /* ZAP */
DMU_OT_DSL_PERMS, /* ZAP */
DMU_OT_ACL, /* ACL */
DMU_OT_SYSACL, /* SYSACL */
DMU_OT_FUID, /* FUID table (Packed NVLIST UINT8) */
DMU_OT_FUID_SIZE, /* FUID table size UINT64 */
DMU_OT_NEXT_CLONES, /* ZAP */
DMU_OT_SCAN_QUEUE, /* ZAP */
DMU_OT_USERGROUP_USED, /* ZAP */
DMU_OT_USERGROUP_QUOTA, /* ZAP */
DMU_OT_USERREFS, /* ZAP */
DMU_OT_DDT_ZAP, /* ZAP */
DMU_OT_DDT_STATS, /* ZAP */
DMU_OT_SA, /* System attr */
DMU_OT_SA_MASTER_NODE, /* ZAP */
DMU_OT_SA_ATTR_REGISTRATION, /* ZAP */
DMU_OT_SA_ATTR_LAYOUTS, /* ZAP */
DMU_OT_SCAN_XLATE, /* ZAP */
DMU_OT_DEDUP, /* fake dedup BP from ddt_bp_create() */
DMU_OT_DEADLIST, /* ZAP */
DMU_OT_DEADLIST_HDR, /* UINT64 */
DMU_OT_DSL_CLONES, /* ZAP */
DMU_OT_BPOBJ_SUBOBJ, /* UINT64 */
/*
* Do not allocate new object types here. Doing so makes the on-disk
* format incompatible with any other format that uses the same object
* type number.
*
* When creating an object which does not have one of the above types
* use the DMU_OTN_* type with the correct byteswap and metadata
* values.
*
* The DMU_OTN_* types do not have entries in the dmu_ot table,
* use the DMU_OT_IS_METADATA() and DMU_OT_BYTESWAP() macros instead
* of indexing into dmu_ot directly (this works for both DMU_OT_* types
* and DMU_OTN_* types).
*/
DMU_OT_NUMTYPES,
/*
* Names for valid types declared with DMU_OT().
*/
DMU_OTN_UINT8_DATA = DMU_OT(DMU_BSWAP_UINT8, B_FALSE, B_FALSE),
DMU_OTN_UINT8_METADATA = DMU_OT(DMU_BSWAP_UINT8, B_TRUE, B_FALSE),
DMU_OTN_UINT16_DATA = DMU_OT(DMU_BSWAP_UINT16, B_FALSE, B_FALSE),
DMU_OTN_UINT16_METADATA = DMU_OT(DMU_BSWAP_UINT16, B_TRUE, B_FALSE),
DMU_OTN_UINT32_DATA = DMU_OT(DMU_BSWAP_UINT32, B_FALSE, B_FALSE),
DMU_OTN_UINT32_METADATA = DMU_OT(DMU_BSWAP_UINT32, B_TRUE, B_FALSE),
DMU_OTN_UINT64_DATA = DMU_OT(DMU_BSWAP_UINT64, B_FALSE, B_FALSE),
DMU_OTN_UINT64_METADATA = DMU_OT(DMU_BSWAP_UINT64, B_TRUE, B_FALSE),
DMU_OTN_ZAP_DATA = DMU_OT(DMU_BSWAP_ZAP, B_FALSE, B_FALSE),
DMU_OTN_ZAP_METADATA = DMU_OT(DMU_BSWAP_ZAP, B_TRUE, B_FALSE),
DMU_OTN_UINT8_ENC_DATA = DMU_OT(DMU_BSWAP_UINT8, B_FALSE, B_TRUE),
DMU_OTN_UINT8_ENC_METADATA = DMU_OT(DMU_BSWAP_UINT8, B_TRUE, B_TRUE),
DMU_OTN_UINT16_ENC_DATA = DMU_OT(DMU_BSWAP_UINT16, B_FALSE, B_TRUE),
DMU_OTN_UINT16_ENC_METADATA = DMU_OT(DMU_BSWAP_UINT16, B_TRUE, B_TRUE),
DMU_OTN_UINT32_ENC_DATA = DMU_OT(DMU_BSWAP_UINT32, B_FALSE, B_TRUE),
DMU_OTN_UINT32_ENC_METADATA = DMU_OT(DMU_BSWAP_UINT32, B_TRUE, B_TRUE),
DMU_OTN_UINT64_ENC_DATA = DMU_OT(DMU_BSWAP_UINT64, B_FALSE, B_TRUE),
DMU_OTN_UINT64_ENC_METADATA = DMU_OT(DMU_BSWAP_UINT64, B_TRUE, B_TRUE),
DMU_OTN_ZAP_ENC_DATA = DMU_OT(DMU_BSWAP_ZAP, B_FALSE, B_TRUE),
DMU_OTN_ZAP_ENC_METADATA = DMU_OT(DMU_BSWAP_ZAP, B_TRUE, B_TRUE),
} dmu_object_type_t;
/*
* These flags are intended to be used to specify the "txg_how"
* parameter when calling the dmu_tx_assign() function. See the comment
* above dmu_tx_assign() for more details on the meaning of these flags.
*/
#define TXG_NOWAIT (0ULL)
#define TXG_WAIT (1ULL<<0)
#define TXG_NOTHROTTLE (1ULL<<1)
void byteswap_uint64_array(void *buf, size_t size);
void byteswap_uint32_array(void *buf, size_t size);
void byteswap_uint16_array(void *buf, size_t size);
void byteswap_uint8_array(void *buf, size_t size);
void zap_byteswap(void *buf, size_t size);
void zfs_oldacl_byteswap(void *buf, size_t size);
void zfs_acl_byteswap(void *buf, size_t size);
void zfs_znode_byteswap(void *buf, size_t size);
#define DS_FIND_SNAPSHOTS (1<<0)
#define DS_FIND_CHILDREN (1<<1)
#define DS_FIND_SERIALIZE (1<<2)
/*
* The maximum number of bytes that can be accessed as part of one
* operation, including metadata.
*/
#define DMU_MAX_ACCESS (64 * 1024 * 1024) /* 64MB */
#define DMU_MAX_DELETEBLKCNT (20480) /* ~5MB of indirect blocks */
#define DMU_USERUSED_OBJECT (-1ULL)
#define DMU_GROUPUSED_OBJECT (-2ULL)
#define DMU_PROJECTUSED_OBJECT (-3ULL)
/*
* Zap prefix for object accounting in DMU_{USER,GROUP,PROJECT}USED_OBJECT.
*/
#define DMU_OBJACCT_PREFIX "obj-"
#define DMU_OBJACCT_PREFIX_LEN 4
/*
* artificial blkids for bonus buffer and spill blocks
*/
#define DMU_BONUS_BLKID (-1ULL)
#define DMU_SPILL_BLKID (-2ULL)
/*
* Public routines to create, destroy, open, and close objsets.
*/
typedef void dmu_objset_create_sync_func_t(objset_t *os, void *arg,
cred_t *cr, dmu_tx_t *tx);
int dmu_objset_hold(const char *name, const void *tag, objset_t **osp);
int dmu_objset_own(const char *name, dmu_objset_type_t type,
boolean_t readonly, boolean_t key_required, const void *tag,
objset_t **osp);
void dmu_objset_rele(objset_t *os, const void *tag);
void dmu_objset_disown(objset_t *os, boolean_t key_required, const void *tag);
int dmu_objset_open_ds(struct dsl_dataset *ds, objset_t **osp);
void dmu_objset_evict_dbufs(objset_t *os);
int dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags,
struct dsl_crypto_params *dcp, dmu_objset_create_sync_func_t func,
void *arg);
int dmu_objset_clone(const char *name, const char *origin);
int dsl_destroy_snapshots_nvl(struct nvlist *snaps, boolean_t defer,
struct nvlist *errlist);
int dmu_objset_snapshot_one(const char *fsname, const char *snapname);
int dmu_objset_find(const char *name, int func(const char *, void *), void *arg,
int flags);
void dmu_objset_byteswap(void *buf, size_t size);
int dsl_dataset_rename_snapshot(const char *fsname,
const char *oldsnapname, const char *newsnapname, boolean_t recursive);
typedef struct dmu_buf {
uint64_t db_object; /* object that this buffer is part of */
uint64_t db_offset; /* byte offset in this object */
uint64_t db_size; /* size of buffer in bytes */
void *db_data; /* data in buffer */
} dmu_buf_t;
/*
* The names of zap entries in the DIRECTORY_OBJECT of the MOS.
*/
#define DMU_POOL_DIRECTORY_OBJECT 1
#define DMU_POOL_CONFIG "config"
#define DMU_POOL_FEATURES_FOR_WRITE "features_for_write"
#define DMU_POOL_FEATURES_FOR_READ "features_for_read"
#define DMU_POOL_FEATURE_DESCRIPTIONS "feature_descriptions"
#define DMU_POOL_FEATURE_ENABLED_TXG "feature_enabled_txg"
#define DMU_POOL_ROOT_DATASET "root_dataset"
#define DMU_POOL_SYNC_BPOBJ "sync_bplist"
#define DMU_POOL_ERRLOG_SCRUB "errlog_scrub"
#define DMU_POOL_ERRLOG_LAST "errlog_last"
#define DMU_POOL_SPARES "spares"
#define DMU_POOL_DEFLATE "deflate"
#define DMU_POOL_HISTORY "history"
#define DMU_POOL_PROPS "pool_props"
#define DMU_POOL_L2CACHE "l2cache"
#define DMU_POOL_TMP_USERREFS "tmp_userrefs"
#define DMU_POOL_DDT "DDT-%s-%s-%s"
#define DMU_POOL_DDT_STATS "DDT-statistics"
#define DMU_POOL_CREATION_VERSION "creation_version"
#define DMU_POOL_SCAN "scan"
#define DMU_POOL_ERRORSCRUB "error_scrub"
#define DMU_POOL_FREE_BPOBJ "free_bpobj"
#define DMU_POOL_BPTREE_OBJ "bptree_obj"
#define DMU_POOL_EMPTY_BPOBJ "empty_bpobj"
#define DMU_POOL_CHECKSUM_SALT "org.illumos:checksum_salt"
#define DMU_POOL_VDEV_ZAP_MAP "com.delphix:vdev_zap_map"
#define DMU_POOL_REMOVING "com.delphix:removing"
#define DMU_POOL_OBSOLETE_BPOBJ "com.delphix:obsolete_bpobj"
#define DMU_POOL_CONDENSING_INDIRECT "com.delphix:condensing_indirect"
#define DMU_POOL_ZPOOL_CHECKPOINT "com.delphix:zpool_checkpoint"
#define DMU_POOL_LOG_SPACEMAP_ZAP "com.delphix:log_spacemap_zap"
#define DMU_POOL_DELETED_CLONES "com.delphix:deleted_clones"
/*
* Allocate an object from this objset. The range of object numbers
* available is (0, DN_MAX_OBJECT). Object 0 is the meta-dnode.
*
* The transaction must be assigned to a txg. The newly allocated
* object will be "held" in the transaction (ie. you can modify the
* newly allocated object in this transaction).
*
* dmu_object_alloc() chooses an object and returns it in *objectp.
*
* dmu_object_claim() allocates a specific object number. If that
* number is already allocated, it fails and returns EEXIST.
*
* Return 0 on success, or ENOSPC or EEXIST as specified above.
*/
uint64_t dmu_object_alloc(objset_t *os, dmu_object_type_t ot,
int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx);
uint64_t dmu_object_alloc_ibs(objset_t *os, dmu_object_type_t ot, int blocksize,
int indirect_blockshift,
dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx);
uint64_t dmu_object_alloc_dnsize(objset_t *os, dmu_object_type_t ot,
int blocksize, dmu_object_type_t bonus_type, int bonus_len,
int dnodesize, dmu_tx_t *tx);
uint64_t dmu_object_alloc_hold(objset_t *os, dmu_object_type_t ot,
int blocksize, int indirect_blockshift, dmu_object_type_t bonustype,
int bonuslen, int dnodesize, dnode_t **allocated_dnode, const void *tag,
dmu_tx_t *tx);
int dmu_object_claim(objset_t *os, uint64_t object, dmu_object_type_t ot,
int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx);
int dmu_object_claim_dnsize(objset_t *os, uint64_t object, dmu_object_type_t ot,
int blocksize, dmu_object_type_t bonus_type, int bonus_len,
int dnodesize, dmu_tx_t *tx);
int dmu_object_reclaim(objset_t *os, uint64_t object, dmu_object_type_t ot,
int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *txp);
int dmu_object_reclaim_dnsize(objset_t *os, uint64_t object,
dmu_object_type_t ot, int blocksize, dmu_object_type_t bonustype,
int bonuslen, int dnodesize, boolean_t keep_spill, dmu_tx_t *tx);
int dmu_object_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx);
/*
* Free an object from this objset.
*
* The object's data will be freed as well (ie. you don't need to call
* dmu_free(object, 0, -1, tx)).
*
* The object need not be held in the transaction.
*
* If there are any holds on this object's buffers (via dmu_buf_hold()),
* or tx holds on the object (via dmu_tx_hold_object()), you can not
* free it; it fails and returns EBUSY.
*
* If the object is not allocated, it fails and returns ENOENT.
*
* Return 0 on success, or EBUSY or ENOENT as specified above.
*/
int dmu_object_free(objset_t *os, uint64_t object, dmu_tx_t *tx);
/*
* Find the next allocated or free object.
*
* The objectp parameter is in-out. It will be updated to be the next
* object which is allocated. Ignore objects which have not been
* modified since txg.
*
* XXX Can only be called on a objset with no dirty data.
*
* Returns 0 on success, or ENOENT if there are no more objects.
*/
int dmu_object_next(objset_t *os, uint64_t *objectp,
boolean_t hole, uint64_t txg);
/*
* Set the number of levels on a dnode. nlevels must be greater than the
* current number of levels or an EINVAL will be returned.
*/
int dmu_object_set_nlevels(objset_t *os, uint64_t object, int nlevels,
dmu_tx_t *tx);
/*
* Set the data blocksize for an object.
*
* The object cannot have any blocks allocated beyond the first. If
* the first block is allocated already, the new size must be greater
* than the current block size. If these conditions are not met,
* ENOTSUP will be returned.
*
* Returns 0 on success, or EBUSY if there are any holds on the object
* contents, or ENOTSUP as described above.
*/
int dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size,
int ibs, dmu_tx_t *tx);
/*
* Manually set the maxblkid on a dnode. This will adjust nlevels accordingly
* to accommodate the change. When calling this function, the caller must
* ensure that the object's nlevels can sufficiently support the new maxblkid.
*/
int dmu_object_set_maxblkid(objset_t *os, uint64_t object, uint64_t maxblkid,
dmu_tx_t *tx);
/*
* Set the checksum property on a dnode. The new checksum algorithm will
* apply to all newly written blocks; existing blocks will not be affected.
*/
void dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
dmu_tx_t *tx);
/*
* Set the compress property on a dnode. The new compression algorithm will
* apply to all newly written blocks; existing blocks will not be affected.
*/
void dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
dmu_tx_t *tx);
void dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset,
void *data, uint8_t etype, uint8_t comp, int uncompressed_size,
int compressed_size, int byteorder, dmu_tx_t *tx);
void dmu_redact(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
dmu_tx_t *tx);
/*
* Decide how to write a block: checksum, compression, number of copies, etc.
*/
#define WP_NOFILL 0x1
#define WP_DMU_SYNC 0x2
#define WP_SPILL 0x4
void dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp,
struct zio_prop *zp);
/*
* The bonus data is accessed more or less like a regular buffer.
* You must dmu_bonus_hold() to get the buffer, which will give you a
* dmu_buf_t with db_offset==-1ULL, and db_size = the size of the bonus
* data. As with any normal buffer, you must call dmu_buf_will_dirty()
* before modifying it, and the
* object must be held in an assigned transaction before calling
* dmu_buf_will_dirty. You may use dmu_buf_set_user() on the bonus
* buffer as well. You must release what you hold with dmu_buf_rele().
*
* Returns ENOENT, EIO, or 0.
*/
int dmu_bonus_hold(objset_t *os, uint64_t object, const void *tag,
dmu_buf_t **dbp);
int dmu_bonus_hold_by_dnode(dnode_t *dn, const void *tag, dmu_buf_t **dbp,
uint32_t flags);
int dmu_bonus_max(void);
int dmu_set_bonus(dmu_buf_t *, int, dmu_tx_t *);
int dmu_set_bonustype(dmu_buf_t *, dmu_object_type_t, dmu_tx_t *);
dmu_object_type_t dmu_get_bonustype(dmu_buf_t *);
int dmu_rm_spill(objset_t *, uint64_t, dmu_tx_t *);
/*
* Special spill buffer support used by "SA" framework
*/
int dmu_spill_hold_by_bonus(dmu_buf_t *bonus, uint32_t flags, const void *tag,
dmu_buf_t **dbp);
int dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags,
const void *tag, dmu_buf_t **dbp);
int dmu_spill_hold_existing(dmu_buf_t *bonus, const void *tag, dmu_buf_t **dbp);
/*
* Obtain the DMU buffer from the specified object which contains the
* specified offset. dmu_buf_hold() puts a "hold" on the buffer, so
* that it will remain in memory. You must release the hold with
* dmu_buf_rele(). You must not access the dmu_buf_t after releasing
* what you hold. You must have a hold on any dmu_buf_t* you pass to the DMU.
*
* You must call dmu_buf_read, dmu_buf_will_dirty, or dmu_buf_will_fill
* on the returned buffer before reading or writing the buffer's
* db_data. The comments for those routines describe what particular
* operations are valid after calling them.
*
* The object number must be a valid, allocated object number.
*/
int dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
const void *tag, dmu_buf_t **, int flags);
int dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
uint64_t length, int read, const void *tag, int *numbufsp,
dmu_buf_t ***dbpp);
+int dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset,
+ const void *tag, dmu_buf_t **dbp);
int dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset,
const void *tag, dmu_buf_t **dbp, int flags);
int dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset,
uint64_t length, boolean_t read, const void *tag, int *numbufsp,
dmu_buf_t ***dbpp, uint32_t flags);
+int dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset, const void *tag,
+ dmu_buf_t **dbp);
/*
* Add a reference to a dmu buffer that has already been held via
* dmu_buf_hold() in the current context.
*/
void dmu_buf_add_ref(dmu_buf_t *db, const void *tag);
/*
* Attempt to add a reference to a dmu buffer that is in an unknown state,
* using a pointer that may have been invalidated by eviction processing.
* The request will succeed if the passed in dbuf still represents the
* same os/object/blkid, is ineligible for eviction, and has at least
* one hold by a user other than the syncer.
*/
boolean_t dmu_buf_try_add_ref(dmu_buf_t *, objset_t *os, uint64_t object,
uint64_t blkid, const void *tag);
void dmu_buf_rele(dmu_buf_t *db, const void *tag);
uint64_t dmu_buf_refcount(dmu_buf_t *db);
uint64_t dmu_buf_user_refcount(dmu_buf_t *db);
/*
* dmu_buf_hold_array holds the DMU buffers which contain all bytes in a
* range of an object. A pointer to an array of dmu_buf_t*'s is
* returned (in *dbpp).
*
* dmu_buf_rele_array releases the hold on an array of dmu_buf_t*'s, and
* frees the array. The hold on the array of buffers MUST be released
* with dmu_buf_rele_array. You can NOT release the hold on each buffer
* individually with dmu_buf_rele.
*/
int dmu_buf_hold_array_by_bonus(dmu_buf_t *db, uint64_t offset,
uint64_t length, boolean_t read, const void *tag,
int *numbufsp, dmu_buf_t ***dbpp);
void dmu_buf_rele_array(dmu_buf_t **, int numbufs, const void *tag);
typedef void dmu_buf_evict_func_t(void *user_ptr);
/*
* A DMU buffer user object may be associated with a dbuf for the
* duration of its lifetime. This allows the user of a dbuf (client)
* to attach private data to a dbuf (e.g. in-core only data such as a
* dnode_children_t, zap_t, or zap_leaf_t) and be optionally notified
* when that dbuf has been evicted. Clients typically respond to the
* eviction notification by freeing their private data, thus ensuring
* the same lifetime for both dbuf and private data.
*
* The mapping from a dmu_buf_user_t to any client private data is the
* client's responsibility. All current consumers of the API with private
* data embed a dmu_buf_user_t as the first member of the structure for
* their private data. This allows conversions between the two types
* with a simple cast. Since the DMU buf user API never needs access
* to the private data, other strategies can be employed if necessary
* or convenient for the client (e.g. using container_of() to do the
* conversion for private data that cannot have the dmu_buf_user_t as
* its first member).
*
* Eviction callbacks are executed without the dbuf mutex held or any
* other type of mechanism to guarantee that the dbuf is still available.
* For this reason, users must assume the dbuf has already been freed
* and not reference the dbuf from the callback context.
*
* Users requesting "immediate eviction" are notified as soon as the dbuf
* is only referenced by dirty records (dirties == holds). Otherwise the
* notification occurs after eviction processing for the dbuf begins.
*/
typedef struct dmu_buf_user {
/*
* Asynchronous user eviction callback state.
*/
taskq_ent_t dbu_tqent;
/*
* This instance's eviction function pointers.
*
* dbu_evict_func_sync is called synchronously and then
* dbu_evict_func_async is executed asynchronously on a taskq.
*/
dmu_buf_evict_func_t *dbu_evict_func_sync;
dmu_buf_evict_func_t *dbu_evict_func_async;
#ifdef ZFS_DEBUG
/*
* Pointer to user's dbuf pointer. NULL for clients that do
* not associate a dbuf with their user data.
*
* The dbuf pointer is cleared upon eviction so as to catch
* use-after-evict bugs in clients.
*/
dmu_buf_t **dbu_clear_on_evict_dbufp;
#endif
} dmu_buf_user_t;
/*
* Initialize the given dmu_buf_user_t instance with the eviction function
* evict_func, to be called when the user is evicted.
*
* NOTE: This function should only be called once on a given dmu_buf_user_t.
* To allow enforcement of this, dbu must already be zeroed on entry.
*/
static inline void
dmu_buf_init_user(dmu_buf_user_t *dbu, dmu_buf_evict_func_t *evict_func_sync,
dmu_buf_evict_func_t *evict_func_async,
dmu_buf_t **clear_on_evict_dbufp __maybe_unused)
{
ASSERT(dbu->dbu_evict_func_sync == NULL);
ASSERT(dbu->dbu_evict_func_async == NULL);
/* must have at least one evict func */
IMPLY(evict_func_sync == NULL, evict_func_async != NULL);
dbu->dbu_evict_func_sync = evict_func_sync;
dbu->dbu_evict_func_async = evict_func_async;
taskq_init_ent(&dbu->dbu_tqent);
#ifdef ZFS_DEBUG
dbu->dbu_clear_on_evict_dbufp = clear_on_evict_dbufp;
#endif
}
/*
* Attach user data to a dbuf and mark it for normal (when the dbuf's
* data is cleared or its reference count goes to zero) eviction processing.
*
* Returns NULL on success, or the existing user if another user currently
* owns the buffer.
*/
void *dmu_buf_set_user(dmu_buf_t *db, dmu_buf_user_t *user);
/*
* Attach user data to a dbuf and mark it for immediate (its dirty and
* reference counts are equal) eviction processing.
*
* Returns NULL on success, or the existing user if another user currently
* owns the buffer.
*/
void *dmu_buf_set_user_ie(dmu_buf_t *db, dmu_buf_user_t *user);
/*
* Replace the current user of a dbuf.
*
* If given the current user of a dbuf, replaces the dbuf's user with
* "new_user" and returns the user data pointer that was replaced.
* Otherwise returns the current, and unmodified, dbuf user pointer.
*/
void *dmu_buf_replace_user(dmu_buf_t *db,
dmu_buf_user_t *old_user, dmu_buf_user_t *new_user);
/*
* Remove the specified user data for a DMU buffer.
*
* Returns the user that was removed on success, or the current user if
* another user currently owns the buffer.
*/
void *dmu_buf_remove_user(dmu_buf_t *db, dmu_buf_user_t *user);
/*
* Returns the user data (dmu_buf_user_t *) associated with this dbuf.
*/
void *dmu_buf_get_user(dmu_buf_t *db);
objset_t *dmu_buf_get_objset(dmu_buf_t *db);
dnode_t *dmu_buf_dnode_enter(dmu_buf_t *db);
void dmu_buf_dnode_exit(dmu_buf_t *db);
/* Block until any in-progress dmu buf user evictions complete. */
void dmu_buf_user_evict_wait(void);
/*
* Returns the blkptr associated with this dbuf, or NULL if not set.
*/
struct blkptr *dmu_buf_get_blkptr(dmu_buf_t *db);
/*
* Indicate that you are going to modify the buffer's data (db_data).
*
* The transaction (tx) must be assigned to a txg (ie. you've called
* dmu_tx_assign()). The buffer's object must be held in the tx
* (ie. you've called dmu_tx_hold_object(tx, db->db_object)).
*/
void dmu_buf_will_dirty(dmu_buf_t *db, dmu_tx_t *tx);
boolean_t dmu_buf_is_dirty(dmu_buf_t *db, dmu_tx_t *tx);
void dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx);
/*
* You must create a transaction, then hold the objects which you will
* (or might) modify as part of this transaction. Then you must assign
* the transaction to a transaction group. Once the transaction has
* been assigned, you can modify buffers which belong to held objects as
* part of this transaction. You can't modify buffers before the
* transaction has been assigned; you can't modify buffers which don't
* belong to objects which this transaction holds; you can't hold
* objects once the transaction has been assigned. You may hold an
* object which you are going to free (with dmu_object_free()), but you
* don't have to.
*
* You can abort the transaction before it has been assigned.
*
* Note that you may hold buffers (with dmu_buf_hold) at any time,
* regardless of transaction state.
*/
#define DMU_NEW_OBJECT (-1ULL)
#define DMU_OBJECT_END (-1ULL)
dmu_tx_t *dmu_tx_create(objset_t *os);
void dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len);
void dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off,
int len);
void dmu_tx_hold_append(dmu_tx_t *tx, uint64_t object, uint64_t off, int len);
void dmu_tx_hold_append_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off,
int len);
void dmu_tx_hold_clone_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off,
int len);
void dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off,
uint64_t len);
void dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off,
uint64_t len);
void dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name);
void dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add,
const char *name);
void dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object);
void dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn);
void dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object);
void dmu_tx_hold_sa(dmu_tx_t *tx, struct sa_handle *hdl, boolean_t may_grow);
void dmu_tx_hold_sa_create(dmu_tx_t *tx, int total_size);
void dmu_tx_abort(dmu_tx_t *tx);
int dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how);
void dmu_tx_wait(dmu_tx_t *tx);
void dmu_tx_commit(dmu_tx_t *tx);
void dmu_tx_mark_netfree(dmu_tx_t *tx);
/*
* To register a commit callback, dmu_tx_callback_register() must be called.
*
* dcb_data is a pointer to caller private data that is passed on as a
* callback parameter. The caller is responsible for properly allocating and
* freeing it.
*
* When registering a callback, the transaction must be already created, but
* it cannot be committed or aborted. It can be assigned to a txg or not.
*
* The callback will be called after the transaction has been safely written
* to stable storage and will also be called if the dmu_tx is aborted.
* If there is any error which prevents the transaction from being committed to
* disk, the callback will be called with a value of error != 0.
*
* When multiple callbacks are registered to the transaction, the callbacks
* will be called in reverse order to let Lustre, the only user of commit
* callback currently, take the fast path of its commit callback handling.
*/
typedef void dmu_tx_callback_func_t(void *dcb_data, int error);
void dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *dcb_func,
void *dcb_data);
void dmu_tx_do_callbacks(list_t *cb_list, int error);
/*
* Free up the data blocks for a defined range of a file. If size is
* -1, the range from offset to end-of-file is freed.
*/
int dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
uint64_t size, dmu_tx_t *tx);
int dmu_free_long_range(objset_t *os, uint64_t object, uint64_t offset,
uint64_t size);
int dmu_free_long_object(objset_t *os, uint64_t object);
/*
* Convenience functions.
*
* Canfail routines will return 0 on success, or an errno if there is a
* nonrecoverable I/O error.
*/
#define DMU_READ_PREFETCH 0 /* prefetch */
#define DMU_READ_NO_PREFETCH 1 /* don't prefetch */
#define DMU_READ_NO_DECRYPT 2 /* don't decrypt */
int dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
void *buf, uint32_t flags);
int dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf,
uint32_t flags);
void dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
const void *buf, dmu_tx_t *tx);
void dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
const void *buf, dmu_tx_t *tx);
void dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
dmu_tx_t *tx);
#ifdef _KERNEL
int dmu_read_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size);
int dmu_read_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size);
int dmu_read_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size);
int dmu_write_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size,
dmu_tx_t *tx);
int dmu_write_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size,
dmu_tx_t *tx);
int dmu_write_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size,
dmu_tx_t *tx);
#endif
struct arc_buf *dmu_request_arcbuf(dmu_buf_t *handle, int size);
void dmu_return_arcbuf(struct arc_buf *buf);
int dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset,
struct arc_buf *buf, dmu_tx_t *tx);
int dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset,
struct arc_buf *buf, dmu_tx_t *tx);
#define dmu_assign_arcbuf dmu_assign_arcbuf_by_dbuf
extern uint_t zfs_max_recordsize;
/*
* Asynchronously try to read in the data.
*/
void dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
uint64_t len, enum zio_priority pri);
typedef struct dmu_object_info {
/* All sizes are in bytes unless otherwise indicated. */
uint32_t doi_data_block_size;
uint32_t doi_metadata_block_size;
dmu_object_type_t doi_type;
dmu_object_type_t doi_bonus_type;
uint64_t doi_bonus_size;
uint8_t doi_indirection; /* 2 = dnode->indirect->data */
uint8_t doi_checksum;
uint8_t doi_compress;
uint8_t doi_nblkptr;
uint8_t doi_pad[4];
uint64_t doi_dnodesize;
uint64_t doi_physical_blocks_512; /* data + metadata, 512b blks */
uint64_t doi_max_offset;
uint64_t doi_fill_count; /* number of non-empty blocks */
} dmu_object_info_t;
typedef void (*const arc_byteswap_func_t)(void *buf, size_t size);
typedef struct dmu_object_type_info {
dmu_object_byteswap_t ot_byteswap;
boolean_t ot_metadata;
boolean_t ot_dbuf_metadata_cache;
boolean_t ot_encrypt;
const char *ot_name;
} dmu_object_type_info_t;
typedef const struct dmu_object_byteswap_info {
arc_byteswap_func_t ob_func;
const char *ob_name;
} dmu_object_byteswap_info_t;
extern const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES];
extern dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS];
/*
* Get information on a DMU object.
*
* Return 0 on success or ENOENT if object is not allocated.
*
* If doi is NULL, just indicates whether the object exists.
*/
int dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi);
void __dmu_object_info_from_dnode(struct dnode *dn, dmu_object_info_t *doi);
/* Like dmu_object_info, but faster if you have a held dnode in hand. */
void dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi);
/* Like dmu_object_info, but faster if you have a held dbuf in hand. */
void dmu_object_info_from_db(dmu_buf_t *db, dmu_object_info_t *doi);
/*
* Like dmu_object_info_from_db, but faster still when you only care about
* the size.
*/
void dmu_object_size_from_db(dmu_buf_t *db, uint32_t *blksize,
u_longlong_t *nblk512);
void dmu_object_dnsize_from_db(dmu_buf_t *db, int *dnsize);
typedef struct dmu_objset_stats {
uint64_t dds_num_clones; /* number of clones of this */
uint64_t dds_creation_txg;
uint64_t dds_guid;
dmu_objset_type_t dds_type;
uint8_t dds_is_snapshot;
uint8_t dds_inconsistent;
uint8_t dds_redacted;
char dds_origin[ZFS_MAX_DATASET_NAME_LEN];
} dmu_objset_stats_t;
/*
* Get stats on a dataset.
*/
void dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat);
/*
* Add entries to the nvlist for all the objset's properties. See
* zfs_prop_table[] and zfs(1m) for details on the properties.
*/
void dmu_objset_stats(objset_t *os, struct nvlist *nv);
/*
* Get the space usage statistics for statvfs().
*
* refdbytes is the amount of space "referenced" by this objset.
* availbytes is the amount of space available to this objset, taking
* into account quotas & reservations, assuming that no other objsets
* use the space first. These values correspond to the 'referenced' and
* 'available' properties, described in the zfs(1m) manpage.
*
* usedobjs and availobjs are the number of objects currently allocated,
* and available.
*/
void dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
uint64_t *usedobjsp, uint64_t *availobjsp);
/*
* The fsid_guid is a 56-bit ID that can change to avoid collisions.
* (Contrast with the ds_guid which is a 64-bit ID that will never
* change, so there is a small probability that it will collide.)
*/
uint64_t dmu_objset_fsid_guid(objset_t *os);
/*
* Get the [cm]time for an objset's snapshot dir
*/
inode_timespec_t dmu_objset_snap_cmtime(objset_t *os);
int dmu_objset_is_snapshot(objset_t *os);
extern struct spa *dmu_objset_spa(objset_t *os);
extern struct zilog *dmu_objset_zil(objset_t *os);
extern struct dsl_pool *dmu_objset_pool(objset_t *os);
extern struct dsl_dataset *dmu_objset_ds(objset_t *os);
extern void dmu_objset_name(objset_t *os, char *buf);
extern dmu_objset_type_t dmu_objset_type(objset_t *os);
extern uint64_t dmu_objset_id(objset_t *os);
extern uint64_t dmu_objset_dnodesize(objset_t *os);
extern zfs_sync_type_t dmu_objset_syncprop(objset_t *os);
extern zfs_logbias_op_t dmu_objset_logbias(objset_t *os);
extern int dmu_objset_blksize(objset_t *os);
extern int dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
uint64_t *id, uint64_t *offp, boolean_t *case_conflict);
extern int dmu_snapshot_lookup(objset_t *os, const char *name, uint64_t *val);
extern int dmu_snapshot_realname(objset_t *os, const char *name, char *real,
int maxlen, boolean_t *conflict);
extern int dmu_dir_list_next(objset_t *os, int namelen, char *name,
uint64_t *idp, uint64_t *offp);
typedef struct zfs_file_info {
uint64_t zfi_user;
uint64_t zfi_group;
uint64_t zfi_project;
uint64_t zfi_generation;
} zfs_file_info_t;
typedef int file_info_cb_t(dmu_object_type_t bonustype, const void *data,
struct zfs_file_info *zoi);
extern void dmu_objset_register_type(dmu_objset_type_t ost,
file_info_cb_t *cb);
extern void dmu_objset_set_user(objset_t *os, void *user_ptr);
extern void *dmu_objset_get_user(objset_t *os);
/*
* Return the txg number for the given assigned transaction.
*/
uint64_t dmu_tx_get_txg(dmu_tx_t *tx);
/*
* Synchronous write.
* If a parent zio is provided this function initiates a write on the
* provided buffer as a child of the parent zio.
* In the absence of a parent zio, the write is completed synchronously.
* At write completion, blk is filled with the bp of the written block.
* Note that while the data covered by this function will be on stable
* storage when the write completes this new data does not become a
* permanent part of the file until the associated transaction commits.
*/
/*
* {zfs,zvol,ztest}_get_done() args
*/
typedef struct zgd {
struct lwb *zgd_lwb;
struct blkptr *zgd_bp;
dmu_buf_t *zgd_db;
struct zfs_locked_range *zgd_lr;
void *zgd_private;
} zgd_t;
typedef void dmu_sync_cb_t(zgd_t *arg, int error);
int dmu_sync(struct zio *zio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd);
/*
* Find the next hole or data block in file starting at *off
* Return found offset in *off. Return ESRCH for end of file.
*/
int dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole,
uint64_t *off);
int dmu_read_l0_bps(objset_t *os, uint64_t object, uint64_t offset,
uint64_t length, struct blkptr *bps, size_t *nbpsp);
int dmu_brt_clone(objset_t *os, uint64_t object, uint64_t offset,
uint64_t length, dmu_tx_t *tx, const struct blkptr *bps, size_t nbps,
boolean_t replay);
/*
* Initial setup and final teardown.
*/
extern void dmu_init(void);
extern void dmu_fini(void);
typedef void (*dmu_traverse_cb_t)(objset_t *os, void *arg, struct blkptr *bp,
uint64_t object, uint64_t offset, int len);
void dmu_traverse_objset(objset_t *os, uint64_t txg_start,
dmu_traverse_cb_t cb, void *arg);
int dmu_diff(const char *tosnap_name, const char *fromsnap_name,
zfs_file_t *fp, offset_t *offp);
/* CRC64 table */
#define ZFS_CRC64_POLY 0xC96C5795D7870F42ULL /* ECMA-182, reflected form */
extern uint64_t zfs_crc64_table[256];
extern uint_t dmu_prefetch_max;
#ifdef __cplusplus
}
#endif
#endif /* _SYS_DMU_H */
diff --git a/sys/contrib/openzfs/include/sys/dmu_impl.h b/sys/contrib/openzfs/include/sys/dmu_impl.h
index ce6ae3c665ac..83ae2b76ba1f 100644
--- a/sys/contrib/openzfs/include/sys/dmu_impl.h
+++ b/sys/contrib/openzfs/include/sys/dmu_impl.h
@@ -1,257 +1,255 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
* Copyright (c) 2013, 2018 by Delphix. All rights reserved.
*/
#ifndef _SYS_DMU_IMPL_H
#define _SYS_DMU_IMPL_H
#include <sys/txg_impl.h>
#include <sys/zio.h>
#include <sys/dnode.h>
#include <sys/zfs_context.h>
#include <sys/zfs_ioctl.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* This is the locking strategy for the DMU. Numbers in parenthesis are
* cases that use that lock order, referenced below:
*
* ARC is self-contained
* bplist is self-contained
* refcount is self-contained
* txg is self-contained (hopefully!)
* zst_lock
* zf_rwlock
*
* XXX try to improve evicting path?
*
* dp_config_rwlock > os_obj_lock > dn_struct_rwlock >
* dn_dbufs_mtx > hash_mutexes > db_mtx > dd_lock > leafs
*
* dp_config_rwlock
* must be held before: everything
* protects dd namespace changes
* protects property changes globally
* held from:
* dsl_dir_open/r:
* dsl_dir_create_sync/w:
* dsl_dir_sync_destroy/w:
* dsl_dir_rename_sync/w:
* dsl_prop_changed_notify/r:
*
* os_obj_lock
* must be held before:
* everything except dp_config_rwlock
* protects os_obj_next
* held from:
* dmu_object_alloc: dn_dbufs_mtx, db_mtx, hash_mutexes, dn_struct_rwlock
*
* dn_struct_rwlock
* must be held before:
* everything except dp_config_rwlock and os_obj_lock
* protects structure of dnode (eg. nlevels)
* db_blkptr can change when syncing out change to nlevels
* dn_maxblkid
* dn_nlevels
* dn_*blksz*
* phys nlevels, maxblkid, physical blkptr_t's (?)
* held from:
* callers of dbuf_read_impl, dbuf_hold[_impl], dbuf_prefetch
* dmu_object_info_from_dnode: dn_dirty_mtx (dn_datablksz)
* dbuf_read_impl: db_mtx, dmu_zfetch()
* dmu_zfetch: zf_rwlock/r, zst_lock, dbuf_prefetch()
* dbuf_new_size: db_mtx
* dbuf_dirty: db_mtx
* dbuf_findbp: (callers, phys? - the real need)
* dbuf_create: dn_dbufs_mtx, hash_mutexes, db_mtx (phys?)
* dbuf_prefetch: dn_dirty_mtx, hash_mutexes, db_mtx, dn_dbufs_mtx
* dbuf_hold_impl: hash_mutexes, db_mtx, dn_dbufs_mtx, dbuf_findbp()
* dnode_sync/w (increase_indirection): db_mtx (phys)
* dnode_set_blksz/w: dn_dbufs_mtx (dn_*blksz*)
* dnode_new_blkid/w: (dn_maxblkid)
* dnode_free_range/w: dn_dirty_mtx (dn_maxblkid)
* dnode_next_offset: (phys)
*
* dn_dbufs_mtx
* must be held before:
* db_mtx, hash_mutexes
* protects:
* dn_dbufs
* dn_evicted
* held from:
* dmu_evict_user: db_mtx (dn_dbufs)
* dbuf_free_range: db_mtx (dn_dbufs)
* dbuf_remove_ref: db_mtx, callees:
* dbuf_hash_remove: hash_mutexes, db_mtx
* dbuf_create: hash_mutexes, db_mtx (dn_dbufs)
* dnode_set_blksz: (dn_dbufs)
*
* hash_mutexes (global)
* must be held before:
* db_mtx
* protects dbuf_hash_table (global) and db_hash_next
* held from:
* dbuf_find: db_mtx
* dbuf_hash_insert: db_mtx
* dbuf_hash_remove: db_mtx
*
* db_mtx (meta-leaf)
* must be held before:
* dn_mtx, dn_dirty_mtx, dd_lock (leaf mutexes)
* protects:
* db_state
* db_holds
* db_buf
* db_changed
* db_data_pending
* db_dirtied
* db_link
* db_dirty_node (??)
* db_dirtycnt
* db_d.*
* db.*
* held from:
* dbuf_dirty: dn_mtx, dn_dirty_mtx
* dbuf_dirty->dsl_dir_willuse_space: dd_lock
* dbuf_dirty->dbuf_new_block->dsl_dataset_block_freeable: dd_lock
* dbuf_undirty: dn_dirty_mtx (db_d)
* dbuf_write_done: dn_dirty_mtx (db_state)
* dbuf_*
* dmu_buf_update_user: none (db_d)
* dmu_evict_user: none (db_d) (maybe can eliminate)
* dbuf_find: none (db_holds)
* dbuf_hash_insert: none (db_holds)
* dmu_buf_read_array_impl: none (db_state, db_changed)
* dmu_sync: none (db_dirty_node, db_d)
* dnode_reallocate: none (db)
*
* dn_mtx (leaf)
* protects:
* dn_dirty_dbufs
* dn_ranges
* phys accounting
* dn_allocated_txg
* dn_free_txg
* dn_assigned_txg
* dn_dirty_txg
* dd_assigned_tx
* dn_notxholds
* dn_nodnholds
* dn_dirtyctx
* dn_dirtyctx_firstset
* (dn_phys copy fields?)
* (dn_phys contents?)
* held from:
* dnode_*
* dbuf_dirty: none
* dbuf_sync: none (phys accounting)
* dbuf_undirty: none (dn_ranges, dn_dirty_dbufs)
* dbuf_write_done: none (phys accounting)
* dmu_object_info_from_dnode: none (accounting)
* dmu_tx_commit: none
* dmu_tx_hold_object_impl: none
* dmu_tx_try_assign: dn_notxholds(cv)
* dmu_tx_unassign: none
*
* dd_lock
* must be held before:
* ds_lock
* ancestors' dd_lock
* protects:
* dd_prop_cbs
* dd_sync_*
* dd_used_bytes
* dd_tempreserved
* dd_space_towrite
* dd_myname
* dd_phys accounting?
* held from:
* dsl_dir_*
* dsl_prop_changed_notify: none (dd_prop_cbs)
* dsl_prop_register: none (dd_prop_cbs)
* dsl_prop_unregister: none (dd_prop_cbs)
*
* os_lock (leaf)
* protects:
* os_dirty_dnodes
* os_free_dnodes
* os_dnodes
* os_downgraded_dbufs
* dn_dirtyblksz
* dn_dirty_link
* held from:
* dnode_create: none (os_dnodes)
* dnode_destroy: none (os_dnodes)
* dnode_setdirty: none (dn_dirtyblksz, os_*_dnodes)
* dnode_free: none (dn_dirtyblksz, os_*_dnodes)
*
* ds_lock
* protects:
* ds_objset
* ds_open_refcount
* ds_snapname
* ds_phys accounting
* ds_phys userrefs zapobj
* ds_reserved
* held from:
* dsl_dataset_*
*
* dr_mtx (leaf)
* protects:
* dr_children
* held from:
* dbuf_dirty
* dbuf_undirty
* dbuf_sync_indirect
* dnode_new_blkid
*/
struct objset;
struct dmu_pool;
typedef struct dmu_sendstatus {
list_node_t dss_link;
int dss_outfd;
proc_t *dss_proc;
offset_t *dss_off;
uint64_t dss_blocks; /* blocks visited during the sending process */
} dmu_sendstatus_t;
void dmu_object_zapify(objset_t *, uint64_t, dmu_object_type_t, dmu_tx_t *);
void dmu_object_free_zapified(objset_t *, uint64_t, dmu_tx_t *);
-int dmu_buf_hold_noread(objset_t *, uint64_t, uint64_t,
- const void *, dmu_buf_t **);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_DMU_IMPL_H */
diff --git a/sys/contrib/openzfs/include/sys/dmu_zfetch.h b/sys/contrib/openzfs/include/sys/dmu_zfetch.h
index 0fbc3bacffb9..f00e13cf03a6 100644
--- a/sys/contrib/openzfs/include/sys/dmu_zfetch.h
+++ b/sys/contrib/openzfs/include/sys/dmu_zfetch.h
@@ -1,88 +1,86 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2014, 2017 by Delphix. All rights reserved.
*/
#ifndef _DMU_ZFETCH_H
#define _DMU_ZFETCH_H
#include <sys/zfs_context.h>
#ifdef __cplusplus
extern "C" {
#endif
-extern uint64_t zfetch_array_rd_sz;
-
struct dnode; /* so we can reference dnode */
typedef struct zfetch {
kmutex_t zf_lock; /* protects zfetch structure */
list_t zf_stream; /* list of zstream_t's */
struct dnode *zf_dnode; /* dnode that owns this zfetch */
int zf_numstreams; /* number of zstream_t's */
} zfetch_t;
typedef struct zstream {
uint64_t zs_blkid; /* expect next access at this blkid */
unsigned int zs_pf_dist; /* data prefetch distance in bytes */
unsigned int zs_ipf_dist; /* L1 prefetch distance in bytes */
uint64_t zs_pf_start; /* first data block to prefetch */
uint64_t zs_pf_end; /* data block to prefetch up to */
uint64_t zs_ipf_start; /* first data block to prefetch L1 */
uint64_t zs_ipf_end; /* data block to prefetch L1 up to */
list_node_t zs_node; /* link for zf_stream */
hrtime_t zs_atime; /* time last prefetch issued */
zfetch_t *zs_fetch; /* parent fetch */
boolean_t zs_missed; /* stream saw cache misses */
boolean_t zs_more; /* need more distant prefetch */
zfs_refcount_t zs_callers; /* number of pending callers */
/*
* Number of stream references: dnode, callers and pending blocks.
* The stream memory is freed when the number returns to zero.
*/
zfs_refcount_t zs_refs;
} zstream_t;
void zfetch_init(void);
void zfetch_fini(void);
void dmu_zfetch_init(zfetch_t *, struct dnode *);
void dmu_zfetch_fini(zfetch_t *);
zstream_t *dmu_zfetch_prepare(zfetch_t *, uint64_t, uint64_t, boolean_t,
boolean_t);
void dmu_zfetch_run(zstream_t *, boolean_t, boolean_t);
void dmu_zfetch(zfetch_t *, uint64_t, uint64_t, boolean_t, boolean_t,
boolean_t);
#ifdef __cplusplus
}
#endif
#endif /* _DMU_ZFETCH_H */
diff --git a/sys/contrib/openzfs/include/sys/fm/fs/zfs.h b/sys/contrib/openzfs/include/sys/fm/fs/zfs.h
index b9bac7e252e5..fb9e8649221e 100644
--- a/sys/contrib/openzfs/include/sys/fm/fs/zfs.h
+++ b/sys/contrib/openzfs/include/sys/fm/fs/zfs.h
@@ -1,139 +1,135 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2020 by Delphix. All rights reserved.
*/
#ifndef _SYS_FM_FS_ZFS_H
#define _SYS_FM_FS_ZFS_H
#ifdef __cplusplus
extern "C" {
#endif
#define ZFS_ERROR_CLASS "fs.zfs"
#define FM_EREPORT_ZFS_CHECKSUM "checksum"
#define FM_EREPORT_ZFS_AUTHENTICATION "authentication"
#define FM_EREPORT_ZFS_IO "io"
#define FM_EREPORT_ZFS_DATA "data"
#define FM_EREPORT_ZFS_DELAY "delay"
#define FM_EREPORT_ZFS_DEADMAN "deadman"
#define FM_EREPORT_ZFS_POOL "zpool"
#define FM_EREPORT_ZFS_DEVICE_UNKNOWN "vdev.unknown"
#define FM_EREPORT_ZFS_DEVICE_OPEN_FAILED "vdev.open_failed"
#define FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA "vdev.corrupt_data"
#define FM_EREPORT_ZFS_DEVICE_NO_REPLICAS "vdev.no_replicas"
#define FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM "vdev.bad_guid_sum"
#define FM_EREPORT_ZFS_DEVICE_TOO_SMALL "vdev.too_small"
#define FM_EREPORT_ZFS_DEVICE_BAD_LABEL "vdev.bad_label"
#define FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT "vdev.bad_ashift"
#define FM_EREPORT_ZFS_IO_FAILURE "io_failure"
#define FM_EREPORT_ZFS_PROBE_FAILURE "probe_failure"
#define FM_EREPORT_ZFS_LOG_REPLAY "log_replay"
#define FM_EREPORT_ZFS_CONFIG_CACHE_WRITE "config_cache_write"
#define FM_EREPORT_PAYLOAD_ZFS_POOL "pool"
#define FM_EREPORT_PAYLOAD_ZFS_POOL_FAILMODE "pool_failmode"
#define FM_EREPORT_PAYLOAD_ZFS_POOL_GUID "pool_guid"
#define FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT "pool_context"
#define FM_EREPORT_PAYLOAD_ZFS_POOL_STATE "pool_state"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID "vdev_guid"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE "vdev_type"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH "vdev_path"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_PHYSPATH "vdev_physpath"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH "vdev_enc_sysfs_path"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID "vdev_devid"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU "vdev_fru"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE "vdev_state"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE "vdev_laststate"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_ASHIFT "vdev_ashift"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_COMP_TS "vdev_complete_ts"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_DELTA_TS "vdev_delta_ts"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_PATHS "vdev_spare_paths"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_GUIDS "vdev_spare_guids"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_READ_ERRORS "vdev_read_errors"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_WRITE_ERRORS "vdev_write_errors"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_CKSUM_ERRORS "vdev_cksum_errors"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_CKSUM_N "vdev_cksum_n"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_CKSUM_T "vdev_cksum_t"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_IO_N "vdev_io_n"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_IO_T "vdev_io_t"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_DELAYS "vdev_delays"
#define FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID "parent_guid"
#define FM_EREPORT_PAYLOAD_ZFS_PARENT_TYPE "parent_type"
#define FM_EREPORT_PAYLOAD_ZFS_PARENT_PATH "parent_path"
#define FM_EREPORT_PAYLOAD_ZFS_PARENT_DEVID "parent_devid"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJSET "zio_objset"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJECT "zio_object"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_LEVEL "zio_level"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_BLKID "zio_blkid"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_ERR "zio_err"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET "zio_offset"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE "zio_size"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS "zio_flags"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE "zio_stage"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY "zio_priority"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE "zio_pipeline"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_DELAY "zio_delay"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_TIMESTAMP "zio_timestamp"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_DELTA "zio_delta"
#define FM_EREPORT_PAYLOAD_ZFS_PREV_STATE "prev_state"
-#define FM_EREPORT_PAYLOAD_ZFS_CKSUM_EXPECTED "cksum_expected"
-#define FM_EREPORT_PAYLOAD_ZFS_CKSUM_ACTUAL "cksum_actual"
#define FM_EREPORT_PAYLOAD_ZFS_CKSUM_ALGO "cksum_algorithm"
#define FM_EREPORT_PAYLOAD_ZFS_CKSUM_BYTESWAP "cksum_byteswap"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_OFFSET_RANGES "bad_ranges"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_MIN_GAP "bad_ranges_min_gap"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_SETS "bad_range_sets"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_CLEARS "bad_range_clears"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_SET_BITS "bad_set_bits"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_BITS "bad_cleared_bits"
-#define FM_EREPORT_PAYLOAD_ZFS_BAD_SET_HISTOGRAM "bad_set_histogram"
-#define FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_HISTOGRAM "bad_cleared_histogram"
#define FM_EREPORT_PAYLOAD_ZFS_SNAPSHOT_NAME "snapshot_name"
#define FM_EREPORT_PAYLOAD_ZFS_DEVICE_NAME "device_name"
#define FM_EREPORT_PAYLOAD_ZFS_RAW_DEVICE_NAME "raw_name"
#define FM_EREPORT_PAYLOAD_ZFS_VOLUME "volume"
#define FM_EREPORT_FAILMODE_WAIT "wait"
#define FM_EREPORT_FAILMODE_CONTINUE "continue"
#define FM_EREPORT_FAILMODE_PANIC "panic"
#define FM_RESOURCE_REMOVED "removed"
#define FM_RESOURCE_AUTOREPLACE "autoreplace"
#define FM_RESOURCE_STATECHANGE "statechange"
#define FM_RESOURCE_ZFS_SNAPSHOT_MOUNT "snapshot_mount"
#define FM_RESOURCE_ZFS_SNAPSHOT_UNMOUNT "snapshot_unmount"
#define FM_RESOURCE_ZVOL_CREATE_SYMLINK "zvol_create"
#define FM_RESOURCE_ZVOL_REMOVE_SYMLINK "zvol_remove"
#ifdef __cplusplus
}
#endif
#endif /* _SYS_FM_FS_ZFS_H */
diff --git a/sys/contrib/openzfs/include/sys/metaslab.h b/sys/contrib/openzfs/include/sys/metaslab.h
index fec080139a2b..0df6e5f81fc1 100644
--- a/sys/contrib/openzfs/include/sys/metaslab.h
+++ b/sys/contrib/openzfs/include/sys/metaslab.h
@@ -1,151 +1,148 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
*/
#ifndef _SYS_METASLAB_H
#define _SYS_METASLAB_H
#include <sys/spa.h>
#include <sys/space_map.h>
#include <sys/txg.h>
#include <sys/zio.h>
#include <sys/avl.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct metaslab_ops {
uint64_t (*msop_alloc)(metaslab_t *, uint64_t);
} metaslab_ops_t;
extern const metaslab_ops_t zfs_metaslab_ops;
int metaslab_init(metaslab_group_t *, uint64_t, uint64_t, uint64_t,
metaslab_t **);
void metaslab_fini(metaslab_t *);
void metaslab_set_unflushed_dirty(metaslab_t *, boolean_t);
void metaslab_set_unflushed_txg(metaslab_t *, uint64_t, dmu_tx_t *);
void metaslab_set_estimated_condensed_size(metaslab_t *, uint64_t, dmu_tx_t *);
boolean_t metaslab_unflushed_dirty(metaslab_t *);
uint64_t metaslab_unflushed_txg(metaslab_t *);
uint64_t metaslab_estimated_condensed_size(metaslab_t *);
int metaslab_sort_by_flushed(const void *, const void *);
void metaslab_unflushed_bump(metaslab_t *, dmu_tx_t *, boolean_t);
uint64_t metaslab_unflushed_changes_memused(metaslab_t *);
int metaslab_load(metaslab_t *);
void metaslab_unload(metaslab_t *);
boolean_t metaslab_flush(metaslab_t *, dmu_tx_t *);
uint64_t metaslab_allocated_space(metaslab_t *);
void metaslab_sync(metaslab_t *, uint64_t);
void metaslab_sync_done(metaslab_t *, uint64_t);
void metaslab_sync_reassess(metaslab_group_t *);
uint64_t metaslab_largest_allocatable(metaslab_t *);
/*
* metaslab alloc flags
*/
#define METASLAB_HINTBP_FAVOR 0x0
#define METASLAB_HINTBP_AVOID 0x1
#define METASLAB_GANG_HEADER 0x2
#define METASLAB_GANG_CHILD 0x4
#define METASLAB_ASYNC_ALLOC 0x8
#define METASLAB_DONT_THROTTLE 0x10
#define METASLAB_MUST_RESERVE 0x20
-#define METASLAB_FASTWRITE 0x40
#define METASLAB_ZIL 0x80
int metaslab_alloc(spa_t *, metaslab_class_t *, uint64_t,
blkptr_t *, int, uint64_t, blkptr_t *, int, zio_alloc_list_t *, zio_t *,
int);
int metaslab_alloc_dva(spa_t *, metaslab_class_t *, uint64_t,
dva_t *, int, dva_t *, uint64_t, int, zio_alloc_list_t *, int);
void metaslab_free(spa_t *, const blkptr_t *, uint64_t, boolean_t);
void metaslab_free_concrete(vdev_t *, uint64_t, uint64_t, boolean_t);
void metaslab_free_dva(spa_t *, const dva_t *, boolean_t);
void metaslab_free_impl_cb(uint64_t, vdev_t *, uint64_t, uint64_t, void *);
void metaslab_unalloc_dva(spa_t *, const dva_t *, uint64_t);
int metaslab_claim(spa_t *, const blkptr_t *, uint64_t);
int metaslab_claim_impl(vdev_t *, uint64_t, uint64_t, uint64_t);
void metaslab_check_free(spa_t *, const blkptr_t *);
-void metaslab_fastwrite_mark(spa_t *, const blkptr_t *);
-void metaslab_fastwrite_unmark(spa_t *, const blkptr_t *);
void metaslab_stat_init(void);
void metaslab_stat_fini(void);
void metaslab_trace_init(zio_alloc_list_t *);
void metaslab_trace_fini(zio_alloc_list_t *);
metaslab_class_t *metaslab_class_create(spa_t *, const metaslab_ops_t *);
void metaslab_class_destroy(metaslab_class_t *);
int metaslab_class_validate(metaslab_class_t *);
void metaslab_class_histogram_verify(metaslab_class_t *);
uint64_t metaslab_class_fragmentation(metaslab_class_t *);
uint64_t metaslab_class_expandable_space(metaslab_class_t *);
boolean_t metaslab_class_throttle_reserve(metaslab_class_t *, int, int,
zio_t *, int);
void metaslab_class_throttle_unreserve(metaslab_class_t *, int, int, zio_t *);
void metaslab_class_evict_old(metaslab_class_t *, uint64_t);
uint64_t metaslab_class_get_alloc(metaslab_class_t *);
uint64_t metaslab_class_get_space(metaslab_class_t *);
uint64_t metaslab_class_get_dspace(metaslab_class_t *);
uint64_t metaslab_class_get_deferred(metaslab_class_t *);
void metaslab_space_update(vdev_t *, metaslab_class_t *,
int64_t, int64_t, int64_t);
metaslab_group_t *metaslab_group_create(metaslab_class_t *, vdev_t *, int);
void metaslab_group_destroy(metaslab_group_t *);
void metaslab_group_activate(metaslab_group_t *);
void metaslab_group_passivate(metaslab_group_t *);
boolean_t metaslab_group_initialized(metaslab_group_t *);
uint64_t metaslab_group_get_space(metaslab_group_t *);
void metaslab_group_histogram_verify(metaslab_group_t *);
uint64_t metaslab_group_fragmentation(metaslab_group_t *);
void metaslab_group_histogram_remove(metaslab_group_t *, metaslab_t *);
void metaslab_group_alloc_decrement(spa_t *, uint64_t, const void *, int, int,
boolean_t);
void metaslab_group_alloc_verify(spa_t *, const blkptr_t *, const void *, int);
void metaslab_recalculate_weight_and_sort(metaslab_t *);
void metaslab_disable(metaslab_t *);
void metaslab_enable(metaslab_t *, boolean_t, boolean_t);
void metaslab_set_selected_txg(metaslab_t *, uint64_t);
extern int metaslab_debug_load;
range_seg_type_t metaslab_calculate_range_tree_type(vdev_t *vdev,
metaslab_t *msp, uint64_t *start, uint64_t *shift);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_METASLAB_H */
diff --git a/sys/contrib/openzfs/include/sys/metaslab_impl.h b/sys/contrib/openzfs/include/sys/metaslab_impl.h
index 5beb1b737775..d328068890cc 100644
--- a/sys/contrib/openzfs/include/sys/metaslab_impl.h
+++ b/sys/contrib/openzfs/include/sys/metaslab_impl.h
@@ -1,573 +1,573 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
*/
#ifndef _SYS_METASLAB_IMPL_H
#define _SYS_METASLAB_IMPL_H
#include <sys/metaslab.h>
#include <sys/space_map.h>
#include <sys/range_tree.h>
#include <sys/vdev.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/multilist.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Metaslab allocation tracing record.
*/
typedef struct metaslab_alloc_trace {
list_node_t mat_list_node;
metaslab_group_t *mat_mg;
metaslab_t *mat_msp;
uint64_t mat_size;
uint64_t mat_weight;
uint32_t mat_dva_id;
uint64_t mat_offset;
int mat_allocator;
} metaslab_alloc_trace_t;
/*
* Used by the metaslab allocation tracing facility to indicate
* error conditions. These errors are stored to the offset member
* of the metaslab_alloc_trace_t record and displayed by mdb.
*/
typedef enum trace_alloc_type {
TRACE_ALLOC_FAILURE = -1ULL,
TRACE_TOO_SMALL = -2ULL,
TRACE_FORCE_GANG = -3ULL,
TRACE_NOT_ALLOCATABLE = -4ULL,
TRACE_GROUP_FAILURE = -5ULL,
TRACE_ENOSPC = -6ULL,
TRACE_CONDENSING = -7ULL,
TRACE_VDEV_ERROR = -8ULL,
TRACE_DISABLED = -9ULL,
} trace_alloc_type_t;
#define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
#define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
#define METASLAB_WEIGHT_CLAIM (1ULL << 61)
#define METASLAB_WEIGHT_TYPE (1ULL << 60)
#define METASLAB_ACTIVE_MASK \
(METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY | \
METASLAB_WEIGHT_CLAIM)
/*
* The metaslab weight is used to encode the amount of free space in a
* metaslab, such that the "best" metaslab appears first when sorting the
* metaslabs by weight. The weight (and therefore the "best" metaslab) can
* be determined in two different ways: by computing a weighted sum of all
* the free space in the metaslab (a space based weight) or by counting only
* the free segments of the largest size (a segment based weight). We prefer
* the segment based weight because it reflects how the free space is
* comprised, but we cannot always use it -- legacy pools do not have the
* space map histogram information necessary to determine the largest
* contiguous regions. Pools that have the space map histogram determine
* the segment weight by looking at each bucket in the histogram and
* determining the free space whose size in bytes is in the range:
* [2^i, 2^(i+1))
* We then encode the largest index, i, that contains regions into the
* segment-weighted value.
*
* Space-based weight:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* |PSC1| weighted-free space |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* PS - indicates primary and secondary activation
* C - indicates activation for claimed block zio
* space - the fragmentation-weighted space
*
* Segment-based weight:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* |PSC0| idx| count of segments in region |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* PS - indicates primary and secondary activation
* C - indicates activation for claimed block zio
* idx - index for the highest bucket in the histogram
* count - number of segments in the specified bucket
*/
#define WEIGHT_GET_ACTIVE(weight) BF64_GET((weight), 61, 3)
#define WEIGHT_SET_ACTIVE(weight, x) BF64_SET((weight), 61, 3, x)
#define WEIGHT_IS_SPACEBASED(weight) \
((weight) == 0 || BF64_GET((weight), 60, 1))
#define WEIGHT_SET_SPACEBASED(weight) BF64_SET((weight), 60, 1, 1)
/*
* These macros are only applicable to segment-based weighting.
*/
#define WEIGHT_GET_INDEX(weight) BF64_GET((weight), 54, 6)
#define WEIGHT_SET_INDEX(weight, x) BF64_SET((weight), 54, 6, x)
#define WEIGHT_GET_COUNT(weight) BF64_GET((weight), 0, 54)
#define WEIGHT_SET_COUNT(weight, x) BF64_SET((weight), 0, 54, x)
/*
* Per-allocator data structure.
*/
typedef struct metaslab_class_allocator {
metaslab_group_t *mca_rotor;
uint64_t mca_aliquot;
/*
* The allocation throttle works on a reservation system. Whenever
* an asynchronous zio wants to perform an allocation it must
* first reserve the number of blocks that it wants to allocate.
* If there aren't sufficient slots available for the pending zio
* then that I/O is throttled until more slots free up. The current
* number of reserved allocations is maintained by the mca_alloc_slots
* refcount. The mca_alloc_max_slots value determines the maximum
* number of allocations that the system allows. Gang blocks are
* allowed to reserve slots even if we've reached the maximum
* number of allocations allowed.
*/
uint64_t mca_alloc_max_slots;
zfs_refcount_t mca_alloc_slots;
} ____cacheline_aligned metaslab_class_allocator_t;
/*
* A metaslab class encompasses a category of allocatable top-level vdevs.
* Each top-level vdev is associated with a metaslab group which defines
* the allocatable region for that vdev. Examples of these categories include
* "normal" for data block allocations (i.e. main pool allocations) or "log"
* for allocations designated for intent log devices (i.e. slog devices).
* When a block allocation is requested from the SPA it is associated with a
* metaslab_class_t, and only top-level vdevs (i.e. metaslab groups) belonging
* to the class can be used to satisfy that request. Allocations are done
* by traversing the metaslab groups that are linked off of the mca_rotor field.
* This rotor points to the next metaslab group where allocations will be
* attempted. Allocating a block is a 3 step process -- select the metaslab
* group, select the metaslab, and then allocate the block. The metaslab
* class defines the low-level block allocator that will be used as the
* final step in allocation. These allocators are pluggable allowing each class
* to use a block allocator that best suits that class.
*/
struct metaslab_class {
kmutex_t mc_lock;
spa_t *mc_spa;
const metaslab_ops_t *mc_ops;
/*
* Track the number of metaslab groups that have been initialized
* and can accept allocations. An initialized metaslab group is
* one has been completely added to the config (i.e. we have
* updated the MOS config and the space has been added to the pool).
*/
uint64_t mc_groups;
/*
* Toggle to enable/disable the allocation throttle.
*/
boolean_t mc_alloc_throttle_enabled;
uint64_t mc_alloc_groups; /* # of allocatable groups */
uint64_t mc_alloc; /* total allocated space */
uint64_t mc_deferred; /* total deferred frees */
uint64_t mc_space; /* total space (alloc + free) */
uint64_t mc_dspace; /* total deflated space */
uint64_t mc_histogram[RANGE_TREE_HISTOGRAM_SIZE];
/*
* List of all loaded metaslabs in the class, sorted in order of most
* recent use.
*/
multilist_t mc_metaslab_txg_list;
metaslab_class_allocator_t mc_allocator[];
};
/*
* Per-allocator data structure.
*/
typedef struct metaslab_group_allocator {
uint64_t mga_cur_max_alloc_queue_depth;
zfs_refcount_t mga_alloc_queue_depth;
metaslab_t *mga_primary;
metaslab_t *mga_secondary;
} metaslab_group_allocator_t;
/*
* Metaslab groups encapsulate all the allocatable regions (i.e. metaslabs)
* of a top-level vdev. They are linked together to form a circular linked
* list and can belong to only one metaslab class. Metaslab groups may become
* ineligible for allocations for a number of reasons such as limited free
* space, fragmentation, or going offline. When this happens the allocator will
* simply find the next metaslab group in the linked list and attempt
* to allocate from that group instead.
*/
struct metaslab_group {
kmutex_t mg_lock;
avl_tree_t mg_metaslab_tree;
uint64_t mg_aliquot;
boolean_t mg_allocatable; /* can we allocate? */
uint64_t mg_ms_ready;
/*
* A metaslab group is considered to be initialized only after
* we have updated the MOS config and added the space to the pool.
* We only allow allocation attempts to a metaslab group if it
* has been initialized.
*/
boolean_t mg_initialized;
uint64_t mg_free_capacity; /* percentage free */
int64_t mg_bias;
int64_t mg_activation_count;
metaslab_class_t *mg_class;
vdev_t *mg_vd;
taskq_t *mg_taskq;
metaslab_group_t *mg_prev;
metaslab_group_t *mg_next;
/*
* In order for the allocation throttle to function properly, we cannot
* have too many IOs going to each disk by default; the throttle
* operates by allocating more work to disks that finish quickly, so
* allocating larger chunks to each disk reduces its effectiveness.
* However, if the number of IOs going to each allocator is too small,
* we will not perform proper aggregation at the vdev_queue layer,
* also resulting in decreased performance. Therefore, we will use a
* ramp-up strategy.
*
* Each allocator in each metaslab group has a current queue depth
* (mg_alloc_queue_depth[allocator]) and a current max queue depth
* (mga_cur_max_alloc_queue_depth[allocator]), and each metaslab group
* has an absolute max queue depth (mg_max_alloc_queue_depth). We
* add IOs to an allocator until the mg_alloc_queue_depth for that
* allocator hits the cur_max. Every time an IO completes for a given
* allocator on a given metaslab group, we increment its cur_max until
* it reaches mg_max_alloc_queue_depth. The cur_max resets every txg to
* help protect against disks that decrease in performance over time.
*
* It's possible for an allocator to handle more allocations than
* its max. This can occur when gang blocks are required or when other
* groups are unable to handle their share of allocations.
*/
uint64_t mg_max_alloc_queue_depth;
/*
* A metalab group that can no longer allocate the minimum block
* size will set mg_no_free_space. Once a metaslab group is out
* of space then its share of work must be distributed to other
* groups.
*/
boolean_t mg_no_free_space;
uint64_t mg_allocations;
uint64_t mg_failed_allocations;
uint64_t mg_fragmentation;
uint64_t mg_histogram[RANGE_TREE_HISTOGRAM_SIZE];
int mg_ms_disabled;
boolean_t mg_disabled_updating;
kmutex_t mg_ms_disabled_lock;
kcondvar_t mg_ms_disabled_cv;
int mg_allocators;
metaslab_group_allocator_t mg_allocator[];
};
/*
* This value defines the number of elements in the ms_lbas array. The value
* of 64 was chosen as it covers all power of 2 buckets up to UINT64_MAX.
* This is the equivalent of highbit(UINT64_MAX).
*/
#define MAX_LBAS 64
/*
* Each metaslab maintains a set of in-core trees to track metaslab
* operations. The in-core free tree (ms_allocatable) contains the list of
* free segments which are eligible for allocation. As blocks are
- * allocated, the allocated segment are removed from the ms_allocatable and
+ * allocated, the allocated segments are removed from the ms_allocatable and
* added to a per txg allocation tree (ms_allocating). As blocks are
* freed, they are added to the free tree (ms_freeing). These trees
* allow us to process all allocations and frees in syncing context
* where it is safe to update the on-disk space maps. An additional set
* of in-core trees is maintained to track deferred frees
* (ms_defer). Once a block is freed it will move from the
* ms_freed to the ms_defer tree. A deferred free means that a block
* has been freed but cannot be used by the pool until TXG_DEFER_SIZE
* transactions groups later. For example, a block that is freed in txg
* 50 will not be available for reallocation until txg 52 (50 +
* TXG_DEFER_SIZE). This provides a safety net for uberblock rollback.
* A pool could be safely rolled back TXG_DEFERS_SIZE transactions
* groups and ensure that no block has been reallocated.
*
* The simplified transition diagram looks like this:
*
*
* ALLOCATE
* |
* V
* free segment (ms_allocatable) -> ms_allocating[4] -> (write to space map)
* ^
* | ms_freeing <--- FREE
* | |
* | v
* | ms_freed
* | |
* +-------- ms_defer[2] <-------+-------> (write to space map)
*
*
* Each metaslab's space is tracked in a single space map in the MOS,
* which is only updated in syncing context. Each time we sync a txg,
* we append the allocs and frees from that txg to the space map. The
* pool space is only updated once all metaslabs have finished syncing.
*
* To load the in-core free tree we read the space map from disk. This
* object contains a series of alloc and free records that are combined
* to make up the list of all free segments in this metaslab. These
* segments are represented in-core by the ms_allocatable and are stored
* in an AVL tree.
*
* As the space map grows (as a result of the appends) it will
* eventually become space-inefficient. When the metaslab's in-core
* free tree is zfs_condense_pct/100 times the size of the minimal
* on-disk representation, we rewrite it in its minimized form. If a
* metaslab needs to condense then we must set the ms_condensing flag to
* ensure that allocations are not performed on the metaslab that is
* being written.
*/
struct metaslab {
/*
* This is the main lock of the metaslab and its purpose is to
- * coordinate our allocations and frees [e.g metaslab_block_alloc(),
+ * coordinate our allocations and frees [e.g., metaslab_block_alloc(),
* metaslab_free_concrete(), ..etc] with our various syncing
- * procedures [e.g. metaslab_sync(), metaslab_sync_done(), ..etc].
+ * procedures [e.g., metaslab_sync(), metaslab_sync_done(), ..etc].
*
* The lock is also used during some miscellaneous operations like
* using the metaslab's histogram for the metaslab group's histogram
* aggregation, or marking the metaslab for initialization.
*/
kmutex_t ms_lock;
/*
* Acquired together with the ms_lock whenever we expect to
* write to metaslab data on-disk (i.e flushing entries to
* the metaslab's space map). It helps coordinate readers of
* the metaslab's space map [see spa_vdev_remove_thread()]
* with writers [see metaslab_sync() or metaslab_flush()].
*
* Note that metaslab_load(), even though a reader, uses
* a completely different mechanism to deal with the reading
* of the metaslab's space map based on ms_synced_length. That
* said, the function still uses the ms_sync_lock after it
* has read the ms_sm [see relevant comment in metaslab_load()
* as to why].
*/
kmutex_t ms_sync_lock;
kcondvar_t ms_load_cv;
space_map_t *ms_sm;
uint64_t ms_id;
uint64_t ms_start;
uint64_t ms_size;
uint64_t ms_fragmentation;
range_tree_t *ms_allocating[TXG_SIZE];
range_tree_t *ms_allocatable;
uint64_t ms_allocated_this_txg;
uint64_t ms_allocating_total;
/*
* The following range trees are accessed only from syncing context.
* ms_free*tree only have entries while syncing, and are empty
* between syncs.
*/
range_tree_t *ms_freeing; /* to free this syncing txg */
range_tree_t *ms_freed; /* already freed this syncing txg */
range_tree_t *ms_defer[TXG_DEFER_SIZE];
range_tree_t *ms_checkpointing; /* to add to the checkpoint */
/*
* The ms_trim tree is the set of allocatable segments which are
* eligible for trimming. (When the metaslab is loaded, it's a
* subset of ms_allocatable.) It's kept in-core as long as the
* autotrim property is set and is not vacated when the metaslab
* is unloaded. Its purpose is to aggregate freed ranges to
* facilitate efficient trimming.
*/
range_tree_t *ms_trim;
boolean_t ms_condensing; /* condensing? */
boolean_t ms_condense_wanted;
/*
* The number of consumers which have disabled the metaslab.
*/
uint64_t ms_disabled;
/*
* We must always hold the ms_lock when modifying ms_loaded
* and ms_loading.
*/
boolean_t ms_loaded;
boolean_t ms_loading;
kcondvar_t ms_flush_cv;
boolean_t ms_flushing;
/*
* The following histograms count entries that are in the
* metaslab's space map (and its histogram) but are not in
* ms_allocatable yet, because they are in ms_freed, ms_freeing,
* or ms_defer[].
*
* When the metaslab is not loaded, its ms_weight needs to
* reflect what is allocatable (i.e. what will be part of
* ms_allocatable if it is loaded). The weight is computed from
* the spacemap histogram, but that includes ranges that are
* not yet allocatable (because they are in ms_freed,
* ms_freeing, or ms_defer[]). Therefore, when calculating the
* weight, we need to remove those ranges.
*
* The ranges in the ms_freed and ms_defer[] range trees are all
* present in the spacemap. However, the spacemap may have
* multiple entries to represent a contiguous range, because it
* is written across multiple sync passes, but the changes of
* all sync passes are consolidated into the range trees.
* Adjacent ranges that are freed in different sync passes of
* one txg will be represented separately (as 2 or more entries)
* in the space map (and its histogram), but these adjacent
* ranges will be consolidated (represented as one entry) in the
* ms_freed/ms_defer[] range trees (and their histograms).
*
* When calculating the weight, we can not simply subtract the
* range trees' histograms from the spacemap's histogram,
* because the range trees' histograms may have entries in
* higher buckets than the spacemap, due to consolidation.
* Instead we must subtract the exact entries that were added to
* the spacemap's histogram. ms_synchist and ms_deferhist[]
* represent these exact entries, so we can subtract them from
* the spacemap's histogram when calculating ms_weight.
*
* ms_synchist represents the same ranges as ms_freeing +
* ms_freed, but without consolidation across sync passes.
*
* ms_deferhist[i] represents the same ranges as ms_defer[i],
* but without consolidation across sync passes.
*/
uint64_t ms_synchist[SPACE_MAP_HISTOGRAM_SIZE];
uint64_t ms_deferhist[TXG_DEFER_SIZE][SPACE_MAP_HISTOGRAM_SIZE];
/*
* Tracks the exact amount of allocated space of this metaslab
* (and specifically the metaslab's space map) up to the most
* recently completed sync pass [see usage in metaslab_sync()].
*/
uint64_t ms_allocated_space;
int64_t ms_deferspace; /* sum of ms_defermap[] space */
uint64_t ms_weight; /* weight vs. others in group */
uint64_t ms_activation_weight; /* activation weight */
/*
* Track of whenever a metaslab is selected for loading or allocation.
* We use this value to determine how long the metaslab should
* stay cached.
*/
uint64_t ms_selected_txg;
/*
* ms_load/unload_time can be used for performance monitoring
* (e.g. by dtrace or mdb).
*/
hrtime_t ms_load_time; /* time last loaded */
hrtime_t ms_unload_time; /* time last unloaded */
hrtime_t ms_selected_time; /* time last allocated from */
uint64_t ms_alloc_txg; /* last successful alloc (debug only) */
uint64_t ms_max_size; /* maximum allocatable size */
/*
* -1 if it's not active in an allocator, otherwise set to the allocator
* this metaslab is active for.
*/
int ms_allocator;
boolean_t ms_primary; /* Only valid if ms_allocator is not -1 */
/*
* The metaslab block allocators can optionally use a size-ordered
* range tree and/or an array of LBAs. Not all allocators use
* this functionality. The ms_allocatable_by_size should always
* contain the same number of segments as the ms_allocatable. The
* only difference is that the ms_allocatable_by_size is ordered by
* segment sizes.
*/
zfs_btree_t ms_allocatable_by_size;
zfs_btree_t ms_unflushed_frees_by_size;
uint64_t ms_lbas[MAX_LBAS];
metaslab_group_t *ms_group; /* metaslab group */
avl_node_t ms_group_node; /* node in metaslab group tree */
txg_node_t ms_txg_node; /* per-txg dirty metaslab links */
avl_node_t ms_spa_txg_node; /* node in spa_metaslabs_by_txg */
/*
* Node in metaslab class's selected txg list
*/
multilist_node_t ms_class_txg_node;
/*
* Allocs and frees that are committed to the vdev log spacemap but
* not yet to this metaslab's spacemap.
*/
range_tree_t *ms_unflushed_allocs;
range_tree_t *ms_unflushed_frees;
/*
* We have flushed entries up to but not including this TXG. In
* other words, all changes from this TXG and onward should not
* be in this metaslab's space map and must be read from the
* log space maps.
*/
uint64_t ms_unflushed_txg;
boolean_t ms_unflushed_dirty;
/* updated every time we are done syncing the metaslab's space map */
uint64_t ms_synced_length;
boolean_t ms_new;
};
typedef struct metaslab_unflushed_phys {
/* on-disk counterpart of ms_unflushed_txg */
uint64_t msp_unflushed_txg;
} metaslab_unflushed_phys_t;
#ifdef __cplusplus
}
#endif
#endif /* _SYS_METASLAB_IMPL_H */
diff --git a/sys/contrib/openzfs/include/sys/spa_impl.h b/sys/contrib/openzfs/include/sys/spa_impl.h
index 44afa763283a..588c72f6e4fa 100644
--- a/sys/contrib/openzfs/include/sys/spa_impl.h
+++ b/sys/contrib/openzfs/include/sys/spa_impl.h
@@ -1,474 +1,475 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019 Datto Inc.
*/
#ifndef _SYS_SPA_IMPL_H
#define _SYS_SPA_IMPL_H
#include <sys/spa.h>
#include <sys/spa_checkpoint.h>
#include <sys/spa_log_spacemap.h>
#include <sys/vdev.h>
#include <sys/vdev_rebuild.h>
#include <sys/vdev_removal.h>
#include <sys/metaslab.h>
#include <sys/dmu.h>
#include <sys/dsl_pool.h>
#include <sys/uberblock_impl.h>
#include <sys/zfs_context.h>
#include <sys/avl.h>
#include <sys/zfs_refcount.h>
#include <sys/bplist.h>
#include <sys/bpobj.h>
#include <sys/dsl_crypt.h>
#include <sys/zfeature.h>
#include <sys/zthr.h>
#include <sys/dsl_deadlist.h>
#include <zfeature_common.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct spa_alloc {
kmutex_t spaa_lock;
avl_tree_t spaa_tree;
} ____cacheline_aligned spa_alloc_t;
typedef struct spa_error_entry {
zbookmark_phys_t se_bookmark;
char *se_name;
avl_node_t se_avl;
zbookmark_err_phys_t se_zep; /* not accounted in avl_find */
} spa_error_entry_t;
typedef struct spa_history_phys {
uint64_t sh_pool_create_len; /* ending offset of zpool create */
uint64_t sh_phys_max_off; /* physical EOF */
uint64_t sh_bof; /* logical BOF */
uint64_t sh_eof; /* logical EOF */
uint64_t sh_records_lost; /* num of records overwritten */
} spa_history_phys_t;
/*
* All members must be uint64_t, for byteswap purposes.
*/
typedef struct spa_removing_phys {
uint64_t sr_state; /* dsl_scan_state_t */
/*
* The vdev ID that we most recently attempted to remove,
* or -1 if no removal has been attempted.
*/
uint64_t sr_removing_vdev;
/*
* The vdev ID that we most recently successfully removed,
* or -1 if no devices have been removed.
*/
uint64_t sr_prev_indirect_vdev;
uint64_t sr_start_time;
uint64_t sr_end_time;
/*
* Note that we can not use the space map's or indirect mapping's
* accounting as a substitute for these values, because we need to
* count frees of not-yet-copied data as though it did the copy.
* Otherwise, we could get into a situation where copied > to_copy,
* or we complete before copied == to_copy.
*/
uint64_t sr_to_copy; /* bytes that need to be copied */
uint64_t sr_copied; /* bytes that have been copied or freed */
} spa_removing_phys_t;
/*
* This struct is stored as an entry in the DMU_POOL_DIRECTORY_OBJECT
* (with key DMU_POOL_CONDENSING_INDIRECT). It is present if a condense
* of an indirect vdev's mapping object is in progress.
*/
typedef struct spa_condensing_indirect_phys {
/*
* The vdev ID of the indirect vdev whose indirect mapping is
* being condensed.
*/
uint64_t scip_vdev;
/*
* The vdev's old obsolete spacemap. This spacemap's contents are
* being integrated into the new mapping.
*/
uint64_t scip_prev_obsolete_sm_object;
/*
* The new mapping object that is being created.
*/
uint64_t scip_next_mapping_object;
} spa_condensing_indirect_phys_t;
struct spa_aux_vdev {
uint64_t sav_object; /* MOS object for device list */
nvlist_t *sav_config; /* cached device config */
vdev_t **sav_vdevs; /* devices */
int sav_count; /* number devices */
boolean_t sav_sync; /* sync the device list */
nvlist_t **sav_pending; /* pending device additions */
uint_t sav_npending; /* # pending devices */
};
typedef struct spa_config_lock {
kmutex_t scl_lock;
kthread_t *scl_writer;
int scl_write_wanted;
int scl_count;
kcondvar_t scl_cv;
} ____cacheline_aligned spa_config_lock_t;
typedef struct spa_config_dirent {
list_node_t scd_link;
char *scd_path;
} spa_config_dirent_t;
typedef enum zio_taskq_type {
ZIO_TASKQ_ISSUE = 0,
ZIO_TASKQ_ISSUE_HIGH,
ZIO_TASKQ_INTERRUPT,
ZIO_TASKQ_INTERRUPT_HIGH,
ZIO_TASKQ_TYPES
} zio_taskq_type_t;
/*
* State machine for the zpool-poolname process. The states transitions
* are done as follows:
*
* From To Routine
* PROC_NONE -> PROC_CREATED spa_activate()
* PROC_CREATED -> PROC_ACTIVE spa_thread()
* PROC_ACTIVE -> PROC_DEACTIVATE spa_deactivate()
* PROC_DEACTIVATE -> PROC_GONE spa_thread()
* PROC_GONE -> PROC_NONE spa_deactivate()
*/
typedef enum spa_proc_state {
SPA_PROC_NONE, /* spa_proc = &p0, no process created */
SPA_PROC_CREATED, /* spa_activate() has proc, is waiting */
SPA_PROC_ACTIVE, /* taskqs created, spa_proc set */
SPA_PROC_DEACTIVATE, /* spa_deactivate() requests process exit */
SPA_PROC_GONE /* spa_thread() is exiting, spa_proc = &p0 */
} spa_proc_state_t;
typedef struct spa_taskqs {
uint_t stqs_count;
taskq_t **stqs_taskq;
} spa_taskqs_t;
typedef enum spa_all_vdev_zap_action {
AVZ_ACTION_NONE = 0,
AVZ_ACTION_DESTROY, /* Destroy all per-vdev ZAPs and the AVZ. */
AVZ_ACTION_REBUILD, /* Populate the new AVZ, see spa_avz_rebuild */
AVZ_ACTION_INITIALIZE
} spa_avz_action_t;
typedef enum spa_config_source {
SPA_CONFIG_SRC_NONE = 0,
SPA_CONFIG_SRC_SCAN, /* scan of path (default: /dev/dsk) */
SPA_CONFIG_SRC_CACHEFILE, /* any cachefile */
SPA_CONFIG_SRC_TRYIMPORT, /* returned from call to tryimport */
SPA_CONFIG_SRC_SPLIT, /* new pool in a pool split */
SPA_CONFIG_SRC_MOS /* MOS, but not always from right txg */
} spa_config_source_t;
struct spa {
/*
* Fields protected by spa_namespace_lock.
*/
char spa_name[ZFS_MAX_DATASET_NAME_LEN]; /* pool name */
char *spa_comment; /* comment */
avl_node_t spa_avl; /* node in spa_namespace_avl */
nvlist_t *spa_config; /* last synced config */
nvlist_t *spa_config_syncing; /* currently syncing config */
nvlist_t *spa_config_splitting; /* config for splitting */
nvlist_t *spa_load_info; /* info and errors from load */
uint64_t spa_config_txg; /* txg of last config change */
uint32_t spa_sync_pass; /* iterate-to-convergence */
pool_state_t spa_state; /* pool state */
int spa_inject_ref; /* injection references */
uint8_t spa_sync_on; /* sync threads are running */
spa_load_state_t spa_load_state; /* current load operation */
boolean_t spa_indirect_vdevs_loaded; /* mappings loaded? */
boolean_t spa_trust_config; /* do we trust vdev tree? */
boolean_t spa_is_splitting; /* in the middle of a split? */
spa_config_source_t spa_config_source; /* where config comes from? */
uint64_t spa_import_flags; /* import specific flags */
spa_taskqs_t spa_zio_taskq[ZIO_TYPES][ZIO_TASKQ_TYPES];
dsl_pool_t *spa_dsl_pool;
boolean_t spa_is_initializing; /* true while opening pool */
boolean_t spa_is_exporting; /* true while exporting pool */
metaslab_class_t *spa_normal_class; /* normal data class */
metaslab_class_t *spa_log_class; /* intent log data class */
metaslab_class_t *spa_embedded_log_class; /* log on normal vdevs */
metaslab_class_t *spa_special_class; /* special allocation class */
metaslab_class_t *spa_dedup_class; /* dedup allocation class */
uint64_t spa_first_txg; /* first txg after spa_open() */
uint64_t spa_final_txg; /* txg of export/destroy */
uint64_t spa_freeze_txg; /* freeze pool at this txg */
uint64_t spa_load_max_txg; /* best initial ub_txg */
uint64_t spa_claim_max_txg; /* highest claimed birth txg */
inode_timespec_t spa_loaded_ts; /* 1st successful open time */
objset_t *spa_meta_objset; /* copy of dp->dp_meta_objset */
kmutex_t spa_evicting_os_lock; /* Evicting objset list lock */
list_t spa_evicting_os_list; /* Objsets being evicted. */
kcondvar_t spa_evicting_os_cv; /* Objset Eviction Completion */
txg_list_t spa_vdev_txg_list; /* per-txg dirty vdev list */
vdev_t *spa_root_vdev; /* top-level vdev container */
uint64_t spa_min_ashift; /* of vdevs in normal class */
uint64_t spa_max_ashift; /* of vdevs in normal class */
uint64_t spa_min_alloc; /* of vdevs in normal class */
+ uint64_t spa_gcd_alloc; /* of vdevs in normal class */
uint64_t spa_config_guid; /* config pool guid */
uint64_t spa_load_guid; /* spa_load initialized guid */
uint64_t spa_last_synced_guid; /* last synced guid */
list_t spa_config_dirty_list; /* vdevs with dirty config */
list_t spa_state_dirty_list; /* vdevs with dirty state */
/*
* spa_allocs is an array, whose lengths is stored in spa_alloc_count.
* There is one tree and one lock for each allocator, to help improve
* allocation performance in write-heavy workloads.
*/
spa_alloc_t *spa_allocs;
int spa_alloc_count;
spa_aux_vdev_t spa_spares; /* hot spares */
spa_aux_vdev_t spa_l2cache; /* L2ARC cache devices */
nvlist_t *spa_label_features; /* Features for reading MOS */
uint64_t spa_config_object; /* MOS object for pool config */
uint64_t spa_config_generation; /* config generation number */
uint64_t spa_syncing_txg; /* txg currently syncing */
bpobj_t spa_deferred_bpobj; /* deferred-free bplist */
bplist_t spa_free_bplist[TXG_SIZE]; /* bplist of stuff to free */
zio_cksum_salt_t spa_cksum_salt; /* secret salt for cksum */
/* checksum context templates */
kmutex_t spa_cksum_tmpls_lock;
void *spa_cksum_tmpls[ZIO_CHECKSUM_FUNCTIONS];
uberblock_t spa_ubsync; /* last synced uberblock */
uberblock_t spa_uberblock; /* current uberblock */
boolean_t spa_extreme_rewind; /* rewind past deferred frees */
kmutex_t spa_scrub_lock; /* resilver/scrub lock */
uint64_t spa_scrub_inflight; /* in-flight scrub bytes */
/* in-flight verification bytes */
uint64_t spa_load_verify_bytes;
kcondvar_t spa_scrub_io_cv; /* scrub I/O completion */
uint8_t spa_scrub_active; /* active or suspended? */
uint8_t spa_scrub_type; /* type of scrub we're doing */
uint8_t spa_scrub_finished; /* indicator to rotate logs */
uint8_t spa_scrub_started; /* started since last boot */
uint8_t spa_scrub_reopen; /* scrub doing vdev_reopen */
uint64_t spa_scan_pass_start; /* start time per pass/reboot */
uint64_t spa_scan_pass_scrub_pause; /* scrub pause time */
uint64_t spa_scan_pass_scrub_spent_paused; /* total paused */
uint64_t spa_scan_pass_exam; /* examined bytes per pass */
uint64_t spa_scan_pass_issued; /* issued bytes per pass */
/* error scrub pause time in milliseconds */
uint64_t spa_scan_pass_errorscrub_pause;
/* total error scrub paused time in milliseconds */
uint64_t spa_scan_pass_errorscrub_spent_paused;
/*
* We are in the middle of a resilver, and another resilver
* is needed once this one completes. This is set iff any
* vdev_resilver_deferred is set.
*/
boolean_t spa_resilver_deferred;
kmutex_t spa_async_lock; /* protect async state */
kthread_t *spa_async_thread; /* thread doing async task */
int spa_async_suspended; /* async tasks suspended */
kcondvar_t spa_async_cv; /* wait for thread_exit() */
uint16_t spa_async_tasks; /* async task mask */
uint64_t spa_missing_tvds; /* unopenable tvds on load */
uint64_t spa_missing_tvds_allowed; /* allow loading spa? */
uint64_t spa_nonallocating_dspace;
spa_removing_phys_t spa_removing_phys;
spa_vdev_removal_t *spa_vdev_removal;
spa_condensing_indirect_phys_t spa_condensing_indirect_phys;
spa_condensing_indirect_t *spa_condensing_indirect;
zthr_t *spa_condense_zthr; /* zthr doing condense. */
uint64_t spa_checkpoint_txg; /* the txg of the checkpoint */
spa_checkpoint_info_t spa_checkpoint_info; /* checkpoint accounting */
zthr_t *spa_checkpoint_discard_zthr;
space_map_t *spa_syncing_log_sm; /* current log space map */
avl_tree_t spa_sm_logs_by_txg;
kmutex_t spa_flushed_ms_lock; /* for metaslabs_by_flushed */
avl_tree_t spa_metaslabs_by_flushed;
spa_unflushed_stats_t spa_unflushed_stats;
list_t spa_log_summary;
uint64_t spa_log_flushall_txg;
zthr_t *spa_livelist_delete_zthr; /* deleting livelists */
zthr_t *spa_livelist_condense_zthr; /* condensing livelists */
uint64_t spa_livelists_to_delete; /* set of livelists to free */
livelist_condense_entry_t spa_to_condense; /* next to condense */
char *spa_root; /* alternate root directory */
uint64_t spa_ena; /* spa-wide ereport ENA */
int spa_last_open_failed; /* error if last open failed */
uint64_t spa_last_ubsync_txg; /* "best" uberblock txg */
uint64_t spa_last_ubsync_txg_ts; /* timestamp from that ub */
uint64_t spa_load_txg; /* ub txg that loaded */
uint64_t spa_load_txg_ts; /* timestamp from that ub */
uint64_t spa_load_meta_errors; /* verify metadata err count */
uint64_t spa_load_data_errors; /* verify data err count */
uint64_t spa_verify_min_txg; /* start txg of verify scrub */
kmutex_t spa_errlog_lock; /* error log lock */
uint64_t spa_errlog_last; /* last error log object */
uint64_t spa_errlog_scrub; /* scrub error log object */
kmutex_t spa_errlist_lock; /* error list/ereport lock */
avl_tree_t spa_errlist_last; /* last error list */
avl_tree_t spa_errlist_scrub; /* scrub error list */
avl_tree_t spa_errlist_healed; /* list of healed blocks */
uint64_t spa_deflate; /* should we deflate? */
uint64_t spa_history; /* history object */
kmutex_t spa_history_lock; /* history lock */
vdev_t *spa_pending_vdev; /* pending vdev additions */
kmutex_t spa_props_lock; /* property lock */
uint64_t spa_pool_props_object; /* object for properties */
uint64_t spa_bootfs; /* default boot filesystem */
uint64_t spa_failmode; /* failure mode for the pool */
uint64_t spa_deadman_failmode; /* failure mode for deadman */
uint64_t spa_delegation; /* delegation on/off */
list_t spa_config_list; /* previous cache file(s) */
/* per-CPU array of root of async I/O: */
zio_t **spa_async_zio_root;
zio_t *spa_suspend_zio_root; /* root of all suspended I/O */
zio_t *spa_txg_zio[TXG_SIZE]; /* spa_sync() waits for this */
kmutex_t spa_suspend_lock; /* protects suspend_zio_root */
kcondvar_t spa_suspend_cv; /* notification of resume */
zio_suspend_reason_t spa_suspended; /* pool is suspended */
uint8_t spa_claiming; /* pool is doing zil_claim() */
boolean_t spa_is_root; /* pool is root */
int spa_minref; /* num refs when first opened */
spa_mode_t spa_mode; /* SPA_MODE_{READ|WRITE} */
boolean_t spa_read_spacemaps; /* spacemaps available if ro */
spa_log_state_t spa_log_state; /* log state */
uint64_t spa_autoexpand; /* lun expansion on/off */
ddt_t *spa_ddt[ZIO_CHECKSUM_FUNCTIONS]; /* in-core DDTs */
uint64_t spa_ddt_stat_object; /* DDT statistics */
uint64_t spa_dedup_dspace; /* Cache get_dedup_dspace() */
uint64_t spa_dedup_checksum; /* default dedup checksum */
uint64_t spa_dspace; /* dspace in normal class */
struct brt *spa_brt; /* in-core BRT */
kmutex_t spa_vdev_top_lock; /* dueling offline/remove */
kmutex_t spa_proc_lock; /* protects spa_proc* */
kcondvar_t spa_proc_cv; /* spa_proc_state transitions */
spa_proc_state_t spa_proc_state; /* see definition */
proc_t *spa_proc; /* "zpool-poolname" process */
uintptr_t spa_did; /* if procp != p0, did of t1 */
boolean_t spa_autoreplace; /* autoreplace set in open */
int spa_vdev_locks; /* locks grabbed */
uint64_t spa_creation_version; /* version at pool creation */
uint64_t spa_prev_software_version; /* See ub_software_version */
uint64_t spa_feat_for_write_obj; /* required to write to pool */
uint64_t spa_feat_for_read_obj; /* required to read from pool */
uint64_t spa_feat_desc_obj; /* Feature descriptions */
uint64_t spa_feat_enabled_txg_obj; /* Feature enabled txg */
kmutex_t spa_feat_stats_lock; /* protects spa_feat_stats */
nvlist_t *spa_feat_stats; /* Cache of enabled features */
/* cache feature refcounts */
uint64_t spa_feat_refcount_cache[SPA_FEATURES];
taskqid_t spa_deadman_tqid; /* Task id */
uint64_t spa_deadman_calls; /* number of deadman calls */
hrtime_t spa_sync_starttime; /* starting time of spa_sync */
uint64_t spa_deadman_synctime; /* deadman sync expiration */
uint64_t spa_deadman_ziotime; /* deadman zio expiration */
uint64_t spa_all_vdev_zaps; /* ZAP of per-vd ZAP obj #s */
spa_avz_action_t spa_avz_action; /* destroy/rebuild AVZ? */
uint64_t spa_autotrim; /* automatic background trim? */
uint64_t spa_errata; /* errata issues detected */
spa_stats_t spa_stats; /* assorted spa statistics */
spa_keystore_t spa_keystore; /* loaded crypto keys */
/* arc_memory_throttle() parameters during low memory condition */
uint64_t spa_lowmem_page_load; /* memory load during txg */
uint64_t spa_lowmem_last_txg; /* txg window start */
hrtime_t spa_ccw_fail_time; /* Conf cache write fail time */
taskq_t *spa_zvol_taskq; /* Taskq for minor management */
taskq_t *spa_prefetch_taskq; /* Taskq for prefetch threads */
uint64_t spa_multihost; /* multihost aware (mmp) */
mmp_thread_t spa_mmp; /* multihost mmp thread */
list_t spa_leaf_list; /* list of leaf vdevs */
uint64_t spa_leaf_list_gen; /* track leaf_list changes */
uint32_t spa_hostid; /* cached system hostid */
/* synchronization for threads in spa_wait */
kmutex_t spa_activities_lock;
kcondvar_t spa_activities_cv;
kcondvar_t spa_waiters_cv;
int spa_waiters; /* number of waiting threads */
boolean_t spa_waiters_cancel; /* waiters should return */
char *spa_compatibility; /* compatibility file(s) */
/*
* spa_refcount & spa_config_lock must be the last elements
* because zfs_refcount_t changes size based on compilation options.
* In order for the MDB module to function correctly, the other
* fields must remain in the same location.
*/
spa_config_lock_t spa_config_lock[SCL_LOCKS]; /* config changes */
zfs_refcount_t spa_refcount; /* number of opens */
taskq_t *spa_upgrade_taskq; /* taskq for upgrade jobs */
};
extern char *spa_config_path;
extern const char *zfs_deadman_failmode;
extern uint_t spa_slop_shift;
extern void spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent);
extern void spa_taskq_dispatch_sync(spa_t *, zio_type_t t, zio_taskq_type_t q,
task_func_t *func, void *arg, uint_t flags);
extern void spa_load_spares(spa_t *spa);
extern void spa_load_l2cache(spa_t *spa);
extern sysevent_t *spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl,
const char *name);
extern void spa_event_post(sysevent_t *ev);
extern int param_set_deadman_failmode_common(const char *val);
extern void spa_set_deadman_synctime(hrtime_t ns);
extern void spa_set_deadman_ziotime(hrtime_t ns);
extern const char *spa_history_zone(void);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_SPA_IMPL_H */
diff --git a/sys/contrib/openzfs/include/sys/vdev_impl.h b/sys/contrib/openzfs/include/sys/vdev_impl.h
index 2b22b973ba49..ad9dc3aefd8e 100644
--- a/sys/contrib/openzfs/include/sys/vdev_impl.h
+++ b/sys/contrib/openzfs/include/sys/vdev_impl.h
@@ -1,649 +1,649 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
*/
#ifndef _SYS_VDEV_IMPL_H
#define _SYS_VDEV_IMPL_H
#include <sys/avl.h>
#include <sys/bpobj.h>
#include <sys/dmu.h>
#include <sys/metaslab.h>
#include <sys/nvpair.h>
#include <sys/space_map.h>
#include <sys/vdev.h>
#include <sys/dkio.h>
#include <sys/uberblock_impl.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/vdev_indirect_births.h>
#include <sys/vdev_rebuild.h>
#include <sys/vdev_removal.h>
#include <sys/zfs_ratelimit.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Virtual device descriptors.
*
* All storage pool operations go through the virtual device framework,
* which provides data replication and I/O scheduling.
*/
/*
* Forward declarations that lots of things need.
*/
typedef struct vdev_queue vdev_queue_t;
struct abd;
extern uint_t zfs_vdev_queue_depth_pct;
extern uint_t zfs_vdev_def_queue_depth;
extern uint_t zfs_vdev_async_write_max_active;
/*
* Virtual device operations
*/
typedef int vdev_init_func_t(spa_t *spa, nvlist_t *nv, void **tsd);
typedef void vdev_kobj_post_evt_func_t(vdev_t *vd);
typedef void vdev_fini_func_t(vdev_t *vd);
typedef int vdev_open_func_t(vdev_t *vd, uint64_t *size, uint64_t *max_size,
uint64_t *ashift, uint64_t *pshift);
typedef void vdev_close_func_t(vdev_t *vd);
typedef uint64_t vdev_asize_func_t(vdev_t *vd, uint64_t psize);
typedef uint64_t vdev_min_asize_func_t(vdev_t *vd);
typedef uint64_t vdev_min_alloc_func_t(vdev_t *vd);
typedef void vdev_io_start_func_t(zio_t *zio);
typedef void vdev_io_done_func_t(zio_t *zio);
typedef void vdev_state_change_func_t(vdev_t *vd, int, int);
typedef boolean_t vdev_need_resilver_func_t(vdev_t *vd, const dva_t *dva,
size_t psize, uint64_t phys_birth);
typedef void vdev_hold_func_t(vdev_t *vd);
typedef void vdev_rele_func_t(vdev_t *vd);
typedef void vdev_remap_cb_t(uint64_t inner_offset, vdev_t *vd,
uint64_t offset, uint64_t size, void *arg);
typedef void vdev_remap_func_t(vdev_t *vd, uint64_t offset, uint64_t size,
vdev_remap_cb_t callback, void *arg);
/*
* Given a target vdev, translates the logical range "in" to the physical
* range "res"
*/
typedef void vdev_xlation_func_t(vdev_t *cvd, const range_seg64_t *logical,
range_seg64_t *physical, range_seg64_t *remain);
typedef uint64_t vdev_rebuild_asize_func_t(vdev_t *vd, uint64_t start,
uint64_t size, uint64_t max_segment);
typedef void vdev_metaslab_init_func_t(vdev_t *vd, uint64_t *startp,
uint64_t *sizep);
typedef void vdev_config_generate_func_t(vdev_t *vd, nvlist_t *nv);
typedef uint64_t vdev_nparity_func_t(vdev_t *vd);
typedef uint64_t vdev_ndisks_func_t(vdev_t *vd);
typedef const struct vdev_ops {
vdev_init_func_t *vdev_op_init;
vdev_fini_func_t *vdev_op_fini;
vdev_open_func_t *vdev_op_open;
vdev_close_func_t *vdev_op_close;
vdev_asize_func_t *vdev_op_asize;
vdev_min_asize_func_t *vdev_op_min_asize;
vdev_min_alloc_func_t *vdev_op_min_alloc;
vdev_io_start_func_t *vdev_op_io_start;
vdev_io_done_func_t *vdev_op_io_done;
vdev_state_change_func_t *vdev_op_state_change;
vdev_need_resilver_func_t *vdev_op_need_resilver;
vdev_hold_func_t *vdev_op_hold;
vdev_rele_func_t *vdev_op_rele;
vdev_remap_func_t *vdev_op_remap;
vdev_xlation_func_t *vdev_op_xlate;
vdev_rebuild_asize_func_t *vdev_op_rebuild_asize;
vdev_metaslab_init_func_t *vdev_op_metaslab_init;
vdev_config_generate_func_t *vdev_op_config_generate;
vdev_nparity_func_t *vdev_op_nparity;
vdev_ndisks_func_t *vdev_op_ndisks;
vdev_kobj_post_evt_func_t *vdev_op_kobj_evt_post;
char vdev_op_type[16];
boolean_t vdev_op_leaf;
} vdev_ops_t;
/*
* Virtual device properties
*/
typedef union vdev_queue_class {
list_t vqc_list;
avl_tree_t vqc_tree;
} vdev_queue_class_t;
struct vdev_queue {
vdev_t *vq_vdev;
vdev_queue_class_t vq_class[ZIO_PRIORITY_NUM_QUEUEABLE];
avl_tree_t vq_read_offset_tree;
avl_tree_t vq_write_offset_tree;
uint64_t vq_last_offset;
zio_priority_t vq_last_prio; /* Last sent I/O priority. */
uint32_t vq_cqueued; /* Classes with queued I/Os. */
uint32_t vq_cactive[ZIO_PRIORITY_NUM_QUEUEABLE];
uint32_t vq_active; /* Number of active I/Os. */
uint32_t vq_ia_active; /* Active interactive I/Os. */
uint32_t vq_nia_credit; /* Non-interactive I/Os credit. */
list_t vq_active_list; /* List of active I/Os. */
hrtime_t vq_io_complete_ts; /* time last i/o completed */
hrtime_t vq_io_delta_ts;
zio_t vq_io_search; /* used as local for stack reduction */
kmutex_t vq_lock;
};
typedef enum vdev_alloc_bias {
VDEV_BIAS_NONE,
VDEV_BIAS_LOG, /* dedicated to ZIL data (SLOG) */
VDEV_BIAS_SPECIAL, /* dedicated to ddt, metadata, and small blks */
VDEV_BIAS_DEDUP /* dedicated to dedup metadata */
} vdev_alloc_bias_t;
/*
* On-disk indirect vdev state.
*
* An indirect vdev is described exclusively in the MOS config of a pool.
* The config for an indirect vdev includes several fields, which are
* accessed in memory by a vdev_indirect_config_t.
*/
typedef struct vdev_indirect_config {
/*
* Object (in MOS) which contains the indirect mapping. This object
* contains an array of vdev_indirect_mapping_entry_phys_t ordered by
* vimep_src. The bonus buffer for this object is a
* vdev_indirect_mapping_phys_t. This object is allocated when a vdev
* removal is initiated.
*
* Note that this object can be empty if none of the data on the vdev
* has been copied yet.
*/
uint64_t vic_mapping_object;
/*
* Object (in MOS) which contains the birth times for the mapping
* entries. This object contains an array of
* vdev_indirect_birth_entry_phys_t sorted by vibe_offset. The bonus
* buffer for this object is a vdev_indirect_birth_phys_t. This object
* is allocated when a vdev removal is initiated.
*
* Note that this object can be empty if none of the vdev has yet been
* copied.
*/
uint64_t vic_births_object;
/*
* This is the vdev ID which was removed previous to this vdev, or
* UINT64_MAX if there are no previously removed vdevs.
*/
uint64_t vic_prev_indirect_vdev;
} vdev_indirect_config_t;
/*
* Virtual device descriptor
*/
struct vdev {
/*
* Common to all vdev types.
*/
uint64_t vdev_id; /* child number in vdev parent */
uint64_t vdev_guid; /* unique ID for this vdev */
uint64_t vdev_guid_sum; /* self guid + all child guids */
uint64_t vdev_orig_guid; /* orig. guid prior to remove */
uint64_t vdev_asize; /* allocatable device capacity */
uint64_t vdev_min_asize; /* min acceptable asize */
uint64_t vdev_max_asize; /* max acceptable asize */
uint64_t vdev_ashift; /* block alignment shift */
/*
* Logical block alignment shift
*
* The smallest sized/aligned I/O supported by the device.
*/
uint64_t vdev_logical_ashift;
/*
* Physical block alignment shift
*
* The device supports logical I/Os with vdev_logical_ashift
* size/alignment, but optimum performance will be achieved by
* aligning/sizing requests to vdev_physical_ashift. Smaller
* requests may be inflated or incur device level read-modify-write
* operations.
*
* May be 0 to indicate no preference (i.e. use vdev_logical_ashift).
*/
uint64_t vdev_physical_ashift;
uint64_t vdev_state; /* see VDEV_STATE_* #defines */
uint64_t vdev_prevstate; /* used when reopening a vdev */
vdev_ops_t *vdev_ops; /* vdev operations */
spa_t *vdev_spa; /* spa for this vdev */
void *vdev_tsd; /* type-specific data */
vdev_t *vdev_top; /* top-level vdev */
vdev_t *vdev_parent; /* parent vdev */
vdev_t **vdev_child; /* array of children */
uint64_t vdev_children; /* number of children */
vdev_stat_t vdev_stat; /* virtual device statistics */
vdev_stat_ex_t vdev_stat_ex; /* extended statistics */
boolean_t vdev_expanding; /* expand the vdev? */
boolean_t vdev_reopening; /* reopen in progress? */
boolean_t vdev_nonrot; /* true if solid state */
int vdev_load_error; /* error on last load */
int vdev_open_error; /* error on last open */
int vdev_validate_error; /* error on last validate */
kthread_t *vdev_open_thread; /* thread opening children */
kthread_t *vdev_validate_thread; /* thread validating children */
uint64_t vdev_crtxg; /* txg when top-level was added */
uint64_t vdev_root_zap;
/*
* Top-level vdev state.
*/
uint64_t vdev_ms_array; /* metaslab array object */
uint64_t vdev_ms_shift; /* metaslab size shift */
uint64_t vdev_ms_count; /* number of metaslabs */
metaslab_group_t *vdev_mg; /* metaslab group */
metaslab_group_t *vdev_log_mg; /* embedded slog metaslab group */
metaslab_t **vdev_ms; /* metaslab array */
- uint64_t vdev_pending_fastwrite; /* allocated fastwrites */
txg_list_t vdev_ms_list; /* per-txg dirty metaslab lists */
txg_list_t vdev_dtl_list; /* per-txg dirty DTL lists */
txg_node_t vdev_txg_node; /* per-txg dirty vdev linkage */
boolean_t vdev_remove_wanted; /* async remove wanted? */
boolean_t vdev_probe_wanted; /* async probe wanted? */
list_node_t vdev_config_dirty_node; /* config dirty list */
list_node_t vdev_state_dirty_node; /* state dirty list */
uint64_t vdev_deflate_ratio; /* deflation ratio (x512) */
uint64_t vdev_islog; /* is an intent log device */
uint64_t vdev_noalloc; /* device is passivated? */
uint64_t vdev_removing; /* device is being removed? */
uint64_t vdev_failfast; /* device failfast setting */
boolean_t vdev_ishole; /* is a hole in the namespace */
uint64_t vdev_top_zap;
vdev_alloc_bias_t vdev_alloc_bias; /* metaslab allocation bias */
/* pool checkpoint related */
space_map_t *vdev_checkpoint_sm; /* contains reserved blocks */
/* Initialize related */
boolean_t vdev_initialize_exit_wanted;
vdev_initializing_state_t vdev_initialize_state;
list_node_t vdev_initialize_node;
kthread_t *vdev_initialize_thread;
/* Protects vdev_initialize_thread and vdev_initialize_state. */
kmutex_t vdev_initialize_lock;
kcondvar_t vdev_initialize_cv;
uint64_t vdev_initialize_offset[TXG_SIZE];
uint64_t vdev_initialize_last_offset;
range_tree_t *vdev_initialize_tree; /* valid while initializing */
uint64_t vdev_initialize_bytes_est;
uint64_t vdev_initialize_bytes_done;
uint64_t vdev_initialize_action_time; /* start and end time */
/* TRIM related */
boolean_t vdev_trim_exit_wanted;
boolean_t vdev_autotrim_exit_wanted;
vdev_trim_state_t vdev_trim_state;
list_node_t vdev_trim_node;
kmutex_t vdev_autotrim_lock;
kcondvar_t vdev_autotrim_cv;
kcondvar_t vdev_autotrim_kick_cv;
kthread_t *vdev_autotrim_thread;
/* Protects vdev_trim_thread and vdev_trim_state. */
kmutex_t vdev_trim_lock;
kcondvar_t vdev_trim_cv;
kthread_t *vdev_trim_thread;
uint64_t vdev_trim_offset[TXG_SIZE];
uint64_t vdev_trim_last_offset;
uint64_t vdev_trim_bytes_est;
uint64_t vdev_trim_bytes_done;
uint64_t vdev_trim_rate; /* requested rate (bytes/sec) */
uint64_t vdev_trim_partial; /* requested partial TRIM */
uint64_t vdev_trim_secure; /* requested secure TRIM */
uint64_t vdev_trim_action_time; /* start and end time */
/* Rebuild related */
boolean_t vdev_rebuilding;
boolean_t vdev_rebuild_exit_wanted;
boolean_t vdev_rebuild_cancel_wanted;
boolean_t vdev_rebuild_reset_wanted;
kmutex_t vdev_rebuild_lock;
kcondvar_t vdev_rebuild_cv;
kthread_t *vdev_rebuild_thread;
vdev_rebuild_t vdev_rebuild_config;
/* For limiting outstanding I/Os (initialize, TRIM) */
kmutex_t vdev_initialize_io_lock;
kcondvar_t vdev_initialize_io_cv;
uint64_t vdev_initialize_inflight;
kmutex_t vdev_trim_io_lock;
kcondvar_t vdev_trim_io_cv;
uint64_t vdev_trim_inflight[3];
/*
* Values stored in the config for an indirect or removing vdev.
*/
vdev_indirect_config_t vdev_indirect_config;
/*
* The vdev_indirect_rwlock protects the vdev_indirect_mapping
* pointer from changing on indirect vdevs (when it is condensed).
* Note that removing (not yet indirect) vdevs have different
* access patterns (the mapping is not accessed from open context,
* e.g. from zio_read) and locking strategy (e.g. svr_lock).
*/
krwlock_t vdev_indirect_rwlock;
vdev_indirect_mapping_t *vdev_indirect_mapping;
vdev_indirect_births_t *vdev_indirect_births;
/*
* In memory data structures used to manage the obsolete sm, for
* indirect or removing vdevs.
*
* The vdev_obsolete_segments is the in-core record of the segments
* that are no longer referenced anywhere in the pool (due to
* being freed or remapped and not referenced by any snapshots).
* During a sync, segments are added to vdev_obsolete_segments
* via vdev_indirect_mark_obsolete(); at the end of each sync
* pass, this is appended to vdev_obsolete_sm via
* vdev_indirect_sync_obsolete(). The vdev_obsolete_lock
* protects against concurrent modifications of vdev_obsolete_segments
* from multiple zio threads.
*/
kmutex_t vdev_obsolete_lock;
range_tree_t *vdev_obsolete_segments;
space_map_t *vdev_obsolete_sm;
/*
* Protects the vdev_scan_io_queue field itself as well as the
* structure's contents (when present).
*/
kmutex_t vdev_scan_io_queue_lock;
struct dsl_scan_io_queue *vdev_scan_io_queue;
/*
* Leaf vdev state.
*/
range_tree_t *vdev_dtl[DTL_TYPES]; /* dirty time logs */
space_map_t *vdev_dtl_sm; /* dirty time log space map */
txg_node_t vdev_dtl_node; /* per-txg dirty DTL linkage */
uint64_t vdev_dtl_object; /* DTL object */
uint64_t vdev_psize; /* physical device capacity */
uint64_t vdev_wholedisk; /* true if this is a whole disk */
uint64_t vdev_offline; /* persistent offline state */
uint64_t vdev_faulted; /* persistent faulted state */
uint64_t vdev_degraded; /* persistent degraded state */
uint64_t vdev_removed; /* persistent removed state */
uint64_t vdev_resilver_txg; /* persistent resilvering state */
uint64_t vdev_rebuild_txg; /* persistent rebuilding state */
char *vdev_path; /* vdev path (if any) */
char *vdev_devid; /* vdev devid (if any) */
char *vdev_physpath; /* vdev device path (if any) */
char *vdev_enc_sysfs_path; /* enclosure sysfs path */
char *vdev_fru; /* physical FRU location */
uint64_t vdev_not_present; /* not present during import */
uint64_t vdev_unspare; /* unspare when resilvering done */
boolean_t vdev_nowritecache; /* true if flushwritecache failed */
boolean_t vdev_has_trim; /* TRIM is supported */
boolean_t vdev_has_securetrim; /* secure TRIM is supported */
boolean_t vdev_checkremove; /* temporary online test */
boolean_t vdev_forcefault; /* force online fault */
boolean_t vdev_splitting; /* split or repair in progress */
boolean_t vdev_delayed_close; /* delayed device close? */
boolean_t vdev_tmpoffline; /* device taken offline temporarily? */
boolean_t vdev_detached; /* device detached? */
boolean_t vdev_cant_read; /* vdev is failing all reads */
boolean_t vdev_cant_write; /* vdev is failing all writes */
boolean_t vdev_isspare; /* was a hot spare */
boolean_t vdev_isl2cache; /* was a l2cache device */
boolean_t vdev_copy_uberblocks; /* post expand copy uberblocks */
boolean_t vdev_resilver_deferred; /* resilver deferred */
boolean_t vdev_kobj_flag; /* kobj event record */
+ boolean_t vdev_attaching; /* vdev attach ashift handling */
vdev_queue_t vdev_queue; /* I/O deadline schedule queue */
spa_aux_vdev_t *vdev_aux; /* for l2cache and spares vdevs */
zio_t *vdev_probe_zio; /* root of current probe */
vdev_aux_t vdev_label_aux; /* on-disk aux state */
uint64_t vdev_leaf_zap;
hrtime_t vdev_mmp_pending; /* 0 if write finished */
uint64_t vdev_mmp_kstat_id; /* to find kstat entry */
uint64_t vdev_expansion_time; /* vdev's last expansion time */
list_node_t vdev_leaf_node; /* leaf vdev list */
/*
* For DTrace to work in userland (libzpool) context, these fields must
* remain at the end of the structure. DTrace will use the kernel's
* CTF definition for 'struct vdev', and since the size of a kmutex_t is
* larger in userland, the offsets for the rest of the fields would be
* incorrect.
*/
kmutex_t vdev_dtl_lock; /* vdev_dtl_{map,resilver} */
kmutex_t vdev_stat_lock; /* vdev_stat */
kmutex_t vdev_probe_lock; /* protects vdev_probe_zio */
/*
* We rate limit ZIO delay, deadman, and checksum events, since they
* can flood ZED with tons of events when a drive is acting up.
*/
zfs_ratelimit_t vdev_delay_rl;
zfs_ratelimit_t vdev_deadman_rl;
zfs_ratelimit_t vdev_checksum_rl;
/*
* Checksum and IO thresholds for tuning ZED
*/
uint64_t vdev_checksum_n;
uint64_t vdev_checksum_t;
uint64_t vdev_io_n;
uint64_t vdev_io_t;
};
#define VDEV_PAD_SIZE (8 << 10)
/* 2 padding areas (vl_pad1 and vl_be) to skip */
#define VDEV_SKIP_SIZE VDEV_PAD_SIZE * 2
#define VDEV_PHYS_SIZE (112 << 10)
#define VDEV_UBERBLOCK_RING (128 << 10)
/*
* MMP blocks occupy the last MMP_BLOCKS_PER_LABEL slots in the uberblock
* ring when MMP is enabled.
*/
#define MMP_BLOCKS_PER_LABEL 1
/* The largest uberblock we support is 8k. */
#define MAX_UBERBLOCK_SHIFT (13)
#define VDEV_UBERBLOCK_SHIFT(vd) \
MIN(MAX((vd)->vdev_top->vdev_ashift, UBERBLOCK_SHIFT), \
MAX_UBERBLOCK_SHIFT)
#define VDEV_UBERBLOCK_COUNT(vd) \
(VDEV_UBERBLOCK_RING >> VDEV_UBERBLOCK_SHIFT(vd))
#define VDEV_UBERBLOCK_OFFSET(vd, n) \
offsetof(vdev_label_t, vl_uberblock[(n) << VDEV_UBERBLOCK_SHIFT(vd)])
#define VDEV_UBERBLOCK_SIZE(vd) (1ULL << VDEV_UBERBLOCK_SHIFT(vd))
typedef struct vdev_phys {
char vp_nvlist[VDEV_PHYS_SIZE - sizeof (zio_eck_t)];
zio_eck_t vp_zbt;
} vdev_phys_t;
typedef enum vbe_vers {
/*
* The bootenv file is stored as ascii text in the envblock.
* It is used by the GRUB bootloader used on Linux to store the
* contents of the grubenv file. The file is stored as raw ASCII,
* and is protected by an embedded checksum. By default, GRUB will
* check if the boot filesystem supports storing the environment data
* in a special location, and if so, will invoke filesystem specific
* logic to retrieve it. This can be overridden by a variable, should
* the user so desire.
*/
VB_RAW = 0,
/*
* The bootenv file is converted to an nvlist and then packed into the
* envblock.
*/
VB_NVLIST = 1
} vbe_vers_t;
typedef struct vdev_boot_envblock {
uint64_t vbe_version;
char vbe_bootenv[VDEV_PAD_SIZE - sizeof (uint64_t) -
sizeof (zio_eck_t)];
zio_eck_t vbe_zbt;
} vdev_boot_envblock_t;
_Static_assert(sizeof (vdev_boot_envblock_t) == VDEV_PAD_SIZE,
"vdev_boot_envblock_t wrong size");
typedef struct vdev_label {
char vl_pad1[VDEV_PAD_SIZE]; /* 8K */
vdev_boot_envblock_t vl_be; /* 8K */
vdev_phys_t vl_vdev_phys; /* 112K */
char vl_uberblock[VDEV_UBERBLOCK_RING]; /* 128K */
} vdev_label_t; /* 256K total */
/*
* vdev_dirty() flags
*/
#define VDD_METASLAB 0x01
#define VDD_DTL 0x02
/* Offset of embedded boot loader region on each label */
#define VDEV_BOOT_OFFSET (2 * sizeof (vdev_label_t))
/*
* Size of embedded boot loader region on each label.
* The total size of the first two labels plus the boot area is 4MB.
*/
#define VDEV_BOOT_SIZE (7ULL << 19) /* 3.5M */
/*
* Size of label regions at the start and end of each leaf device.
*/
#define VDEV_LABEL_START_SIZE (2 * sizeof (vdev_label_t) + VDEV_BOOT_SIZE)
#define VDEV_LABEL_END_SIZE (2 * sizeof (vdev_label_t))
#define VDEV_LABELS 4
#define VDEV_BEST_LABEL VDEV_LABELS
#define VDEV_OFFSET_IS_LABEL(vd, off) \
(((off) < VDEV_LABEL_START_SIZE) || \
((off) >= ((vd)->vdev_psize - VDEV_LABEL_END_SIZE)))
#define VDEV_ALLOC_LOAD 0
#define VDEV_ALLOC_ADD 1
#define VDEV_ALLOC_SPARE 2
#define VDEV_ALLOC_L2CACHE 3
#define VDEV_ALLOC_ROOTPOOL 4
#define VDEV_ALLOC_SPLIT 5
#define VDEV_ALLOC_ATTACH 6
/*
* Allocate or free a vdev
*/
extern vdev_t *vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid,
vdev_ops_t *ops);
extern int vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *config,
vdev_t *parent, uint_t id, int alloctype);
extern void vdev_free(vdev_t *vd);
/*
* Add or remove children and parents
*/
extern void vdev_add_child(vdev_t *pvd, vdev_t *cvd);
extern void vdev_remove_child(vdev_t *pvd, vdev_t *cvd);
extern void vdev_compact_children(vdev_t *pvd);
extern vdev_t *vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops);
extern void vdev_remove_parent(vdev_t *cvd);
/*
* vdev sync load and sync
*/
extern boolean_t vdev_log_state_valid(vdev_t *vd);
extern int vdev_load(vdev_t *vd);
extern int vdev_dtl_load(vdev_t *vd);
extern void vdev_sync(vdev_t *vd, uint64_t txg);
extern void vdev_sync_done(vdev_t *vd, uint64_t txg);
extern void vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg);
extern void vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg);
/*
* Available vdev types.
*/
extern vdev_ops_t vdev_root_ops;
extern vdev_ops_t vdev_mirror_ops;
extern vdev_ops_t vdev_replacing_ops;
extern vdev_ops_t vdev_raidz_ops;
extern vdev_ops_t vdev_draid_ops;
extern vdev_ops_t vdev_draid_spare_ops;
extern vdev_ops_t vdev_disk_ops;
extern vdev_ops_t vdev_file_ops;
extern vdev_ops_t vdev_missing_ops;
extern vdev_ops_t vdev_hole_ops;
extern vdev_ops_t vdev_spare_ops;
extern vdev_ops_t vdev_indirect_ops;
/*
* Common size functions
*/
extern void vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs);
extern uint64_t vdev_default_asize(vdev_t *vd, uint64_t psize);
extern uint64_t vdev_default_min_asize(vdev_t *vd);
extern uint64_t vdev_get_min_asize(vdev_t *vd);
extern void vdev_set_min_asize(vdev_t *vd);
extern uint64_t vdev_get_min_alloc(vdev_t *vd);
extern uint64_t vdev_get_nparity(vdev_t *vd);
extern uint64_t vdev_get_ndisks(vdev_t *vd);
/*
* Global variables
*/
extern int zfs_vdev_standard_sm_blksz;
/*
* Functions from vdev_indirect.c
*/
extern void vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx);
extern boolean_t vdev_indirect_should_condense(vdev_t *vd);
extern void spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx);
extern int vdev_obsolete_sm_object(vdev_t *vd, uint64_t *sm_obj);
extern int vdev_obsolete_counts_are_precise(vdev_t *vd, boolean_t *are_precise);
/*
* Other miscellaneous functions
*/
int vdev_checkpoint_sm_object(vdev_t *vd, uint64_t *sm_obj);
void vdev_metaslab_group_create(vdev_t *vd);
uint64_t vdev_best_ashift(uint64_t logical, uint64_t a, uint64_t b);
/*
* Vdev ashift optimization tunables
*/
extern uint_t zfs_vdev_min_auto_ashift;
extern uint_t zfs_vdev_max_auto_ashift;
int param_set_min_auto_ashift(ZFS_MODULE_PARAM_ARGS);
int param_set_max_auto_ashift(ZFS_MODULE_PARAM_ARGS);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_VDEV_IMPL_H */
diff --git a/sys/contrib/openzfs/include/sys/zil_impl.h b/sys/contrib/openzfs/include/sys/zil_impl.h
index 03a409c5257c..f780ad3d61bc 100644
--- a/sys/contrib/openzfs/include/sys/zil_impl.h
+++ b/sys/contrib/openzfs/include/sys/zil_impl.h
@@ -1,242 +1,256 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
#ifndef _SYS_ZIL_IMPL_H
#define _SYS_ZIL_IMPL_H
#include <sys/zil.h>
#include <sys/dmu_objset.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Possible states for a given lwb structure.
*
- * An lwb will start out in the "closed" state, and then transition to
- * the "opened" state via a call to zil_lwb_write_open(). When
- * transitioning from "closed" to "opened" the zilog's "zl_issuer_lock"
- * must be held.
+ * An lwb will start out in the "new" state, and transition to the "opened"
+ * state via a call to zil_lwb_write_open() on first itx assignment. When
+ * transitioning from "new" to "opened" the zilog's "zl_issuer_lock" must be
+ * held.
*
- * After the lwb is "opened", it can transition into the "issued" state
- * via zil_lwb_write_close(). Again, the zilog's "zl_issuer_lock" must
- * be held when making this transition.
+ * After the lwb is "opened", it can be assigned number of itxs and transition
+ * into the "closed" state via zil_lwb_write_close() when full or on timeout.
+ * When transitioning from "opened" to "closed" the zilog's "zl_issuer_lock"
+ * must be held. New lwb allocation also takes "zl_lock" to protect the list.
+ *
+ * After the lwb is "closed", it can transition into the "ready" state via
+ * zil_lwb_write_issue(). "zl_lock" must be held when making this transition.
+ * Since it is done by the same thread, "zl_issuer_lock" is not needed.
+ *
+ * When lwb in "ready" state receives its block pointer, it can transition to
+ * "issued". "zl_lock" must be held when making this transition.
*
* After the lwb's write zio completes, it transitions into the "write
* done" state via zil_lwb_write_done(); and then into the "flush done"
* state via zil_lwb_flush_vdevs_done(). When transitioning from
* "issued" to "write done", and then from "write done" to "flush done",
* the zilog's "zl_lock" must be held, *not* the "zl_issuer_lock".
*
* The zilog's "zl_issuer_lock" can become heavily contended in certain
* workloads, so we specifically avoid acquiring that lock when
* transitioning an lwb from "issued" to "done". This allows us to avoid
* having to acquire the "zl_issuer_lock" for each lwb ZIO completion,
* which would have added more lock contention on an already heavily
* contended lock.
*
* Additionally, correctness when reading an lwb's state is often
* achieved by exploiting the fact that these state transitions occur in
- * this specific order; i.e. "closed" to "opened" to "issued" to "done".
+ * this specific order; i.e. "new" to "opened" to "closed" to "ready" to
+ * "issued" to "write_done" and finally "flush_done".
*
- * Thus, if an lwb is in the "closed" or "opened" state, holding the
+ * Thus, if an lwb is in the "new" or "opened" state, holding the
* "zl_issuer_lock" will prevent a concurrent thread from transitioning
- * that lwb to the "issued" state. Likewise, if an lwb is already in the
- * "issued" state, holding the "zl_lock" will prevent a concurrent
- * thread from transitioning that lwb to the "write done" state.
+ * that lwb to the "closed" state. Likewise, if an lwb is already in the
+ * "ready" state, holding the "zl_lock" will prevent a concurrent thread
+ * from transitioning that lwb to the "issued" state.
*/
typedef enum {
- LWB_STATE_CLOSED,
+ LWB_STATE_NEW,
LWB_STATE_OPENED,
+ LWB_STATE_CLOSED,
+ LWB_STATE_READY,
LWB_STATE_ISSUED,
LWB_STATE_WRITE_DONE,
LWB_STATE_FLUSH_DONE,
LWB_NUM_STATES
} lwb_state_t;
/*
* Log write block (lwb)
*
* Prior to an lwb being issued to disk via zil_lwb_write_issue(), it
* will be protected by the zilog's "zl_issuer_lock". Basically, prior
* to it being issued, it will only be accessed by the thread that's
* holding the "zl_issuer_lock". After the lwb is issued, the zilog's
* "zl_lock" is used to protect the lwb against concurrent access.
*/
typedef struct lwb {
zilog_t *lwb_zilog; /* back pointer to log struct */
blkptr_t lwb_blk; /* on disk address of this log blk */
- boolean_t lwb_fastwrite; /* is blk marked for fastwrite? */
+ boolean_t lwb_slim; /* log block has slim format */
boolean_t lwb_slog; /* lwb_blk is on SLOG device */
- boolean_t lwb_indirect; /* do not postpone zil_lwb_commit() */
+ int lwb_error; /* log block allocation error */
+ int lwb_nmax; /* max bytes in the buffer */
int lwb_nused; /* # used bytes in buffer */
int lwb_nfilled; /* # filled bytes in buffer */
int lwb_sz; /* size of block and buffer */
lwb_state_t lwb_state; /* the state of this lwb */
char *lwb_buf; /* log write buffer */
+ zio_t *lwb_child_zio; /* parent zio for children */
zio_t *lwb_write_zio; /* zio for the lwb buffer */
zio_t *lwb_root_zio; /* root zio for lwb write and flushes */
hrtime_t lwb_issued_timestamp; /* when was the lwb issued? */
uint64_t lwb_issued_txg; /* the txg when the write is issued */
+ uint64_t lwb_alloc_txg; /* the txg when lwb_blk is allocated */
uint64_t lwb_max_txg; /* highest txg in this lwb */
list_node_t lwb_node; /* zilog->zl_lwb_list linkage */
list_node_t lwb_issue_node; /* linkage of lwbs ready for issue */
list_t lwb_itxs; /* list of itx's */
list_t lwb_waiters; /* list of zil_commit_waiter's */
avl_tree_t lwb_vdev_tree; /* vdevs to flush after lwb write */
kmutex_t lwb_vdev_lock; /* protects lwb_vdev_tree */
} lwb_t;
/*
* ZIL commit waiter.
*
* This structure is allocated each time zil_commit() is called, and is
* used by zil_commit() to communicate with other parts of the ZIL, such
* that zil_commit() can know when it safe for it return. For more
* details, see the comment above zil_commit().
*
* The "zcw_lock" field is used to protect the commit waiter against
* concurrent access. This lock is often acquired while already holding
* the zilog's "zl_issuer_lock" or "zl_lock"; see the functions
* zil_process_commit_list() and zil_lwb_flush_vdevs_done() as examples
* of this. Thus, one must be careful not to acquire the
* "zl_issuer_lock" or "zl_lock" when already holding the "zcw_lock";
* e.g. see the zil_commit_waiter_timeout() function.
*/
typedef struct zil_commit_waiter {
kcondvar_t zcw_cv; /* signalled when "done" */
kmutex_t zcw_lock; /* protects fields of this struct */
list_node_t zcw_node; /* linkage in lwb_t:lwb_waiter list */
lwb_t *zcw_lwb; /* back pointer to lwb when linked */
boolean_t zcw_done; /* B_TRUE when "done", else B_FALSE */
int zcw_zio_error; /* contains the zio io_error value */
} zil_commit_waiter_t;
/*
* Intent log transaction lists
*/
typedef struct itxs {
list_t i_sync_list; /* list of synchronous itxs */
avl_tree_t i_async_tree; /* tree of foids for async itxs */
} itxs_t;
typedef struct itxg {
kmutex_t itxg_lock; /* lock for this structure */
uint64_t itxg_txg; /* txg for this chain */
itxs_t *itxg_itxs; /* sync and async itxs */
} itxg_t;
/* for async nodes we build up an AVL tree of lists of async itxs per file */
typedef struct itx_async_node {
uint64_t ia_foid; /* file object id */
list_t ia_list; /* list of async itxs for this foid */
avl_node_t ia_node; /* AVL tree linkage */
} itx_async_node_t;
/*
* Vdev flushing: during a zil_commit(), we build up an AVL tree of the vdevs
* we've touched so we know which ones need a write cache flush at the end.
*/
typedef struct zil_vdev_node {
uint64_t zv_vdev; /* vdev to be flushed */
avl_node_t zv_node; /* AVL tree linkage */
} zil_vdev_node_t;
#define ZIL_PREV_BLKS 16
/*
* Stable storage intent log management structure. One per dataset.
*/
struct zilog {
kmutex_t zl_lock; /* protects most zilog_t fields */
struct dsl_pool *zl_dmu_pool; /* DSL pool */
spa_t *zl_spa; /* handle for read/write log */
const zil_header_t *zl_header; /* log header buffer */
objset_t *zl_os; /* object set we're logging */
zil_get_data_t *zl_get_data; /* callback to get object content */
lwb_t *zl_last_lwb_opened; /* most recent lwb opened */
hrtime_t zl_last_lwb_latency; /* zio latency of last lwb done */
uint64_t zl_lr_seq; /* on-disk log record sequence number */
uint64_t zl_commit_lr_seq; /* last committed on-disk lr seq */
uint64_t zl_destroy_txg; /* txg of last zil_destroy() */
uint64_t zl_replayed_seq[TXG_SIZE]; /* last replayed rec seq */
uint64_t zl_replaying_seq; /* current replay seq number */
uint32_t zl_suspend; /* log suspend count */
kcondvar_t zl_cv_suspend; /* log suspend completion */
uint8_t zl_suspending; /* log is currently suspending */
uint8_t zl_keep_first; /* keep first log block in destroy */
uint8_t zl_replay; /* replaying records while set */
uint8_t zl_stop_sync; /* for debugging */
kmutex_t zl_issuer_lock; /* single writer, per ZIL, at a time */
uint8_t zl_logbias; /* latency or throughput */
uint8_t zl_sync; /* synchronous or asynchronous */
int zl_parse_error; /* last zil_parse() error */
uint64_t zl_parse_blk_seq; /* highest blk seq on last parse */
uint64_t zl_parse_lr_seq; /* highest lr seq on last parse */
uint64_t zl_parse_blk_count; /* number of blocks parsed */
uint64_t zl_parse_lr_count; /* number of log records parsed */
itxg_t zl_itxg[TXG_SIZE]; /* intent log txg chains */
list_t zl_itx_commit_list; /* itx list to be committed */
uint64_t zl_cur_used; /* current commit log size used */
list_t zl_lwb_list; /* in-flight log write list */
avl_tree_t zl_bp_tree; /* track bps during log parse */
clock_t zl_replay_time; /* lbolt of when replay started */
uint64_t zl_replay_blks; /* number of log blocks replayed */
zil_header_t zl_old_header; /* debugging aid */
uint_t zl_prev_blks[ZIL_PREV_BLKS]; /* size - sector rounded */
uint_t zl_prev_rotor; /* rotor for zl_prev[] */
txg_node_t zl_dirty_link; /* protected by dp_dirty_zilogs list */
uint64_t zl_dirty_max_txg; /* highest txg used to dirty zilog */
kmutex_t zl_lwb_io_lock; /* protect following members */
uint64_t zl_lwb_inflight[TXG_SIZE]; /* io issued, but not done */
kcondvar_t zl_lwb_io_cv; /* signal when the flush is done */
uint64_t zl_lwb_max_issued_txg; /* max txg when lwb io issued */
/*
* Max block size for this ZIL. Note that this can not be changed
* while the ZIL is in use because consumers (ZPL/zvol) need to take
* this into account when deciding between WR_COPIED and WR_NEED_COPY
* (see zil_max_copied_data()).
*/
uint64_t zl_max_block_size;
/* Pointer for per dataset zil sums */
zil_sums_t *zl_sums;
};
typedef struct zil_bp_node {
dva_t zn_dva;
avl_node_t zn_node;
} zil_bp_node_t;
#ifdef __cplusplus
}
#endif
#endif /* _SYS_ZIL_IMPL_H */
diff --git a/sys/contrib/openzfs/include/sys/zio.h b/sys/contrib/openzfs/include/sys/zio.h
index f4da80783e56..e1f4d5c04499 100644
--- a/sys/contrib/openzfs/include/sys/zio.h
+++ b/sys/contrib/openzfs/include/sys/zio.h
@@ -1,725 +1,724 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019-2020, Michael Niewöhner
*/
#ifndef _ZIO_H
#define _ZIO_H
#include <sys/zio_priority.h>
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/fs/zfs.h>
#include <sys/zio_impl.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Embedded checksum
*/
#define ZEC_MAGIC 0x210da7ab10c7a11ULL
typedef struct zio_eck {
uint64_t zec_magic; /* for validation, endianness */
zio_cksum_t zec_cksum; /* 256-bit checksum */
} zio_eck_t;
/*
* Gang block headers are self-checksumming and contain an array
* of block pointers.
*/
#define SPA_GANGBLOCKSIZE SPA_MINBLOCKSIZE
#define SPA_GBH_NBLKPTRS ((SPA_GANGBLOCKSIZE - \
sizeof (zio_eck_t)) / sizeof (blkptr_t))
#define SPA_GBH_FILLER ((SPA_GANGBLOCKSIZE - \
sizeof (zio_eck_t) - \
(SPA_GBH_NBLKPTRS * sizeof (blkptr_t))) /\
sizeof (uint64_t))
typedef struct zio_gbh {
blkptr_t zg_blkptr[SPA_GBH_NBLKPTRS];
uint64_t zg_filler[SPA_GBH_FILLER];
zio_eck_t zg_tail;
} zio_gbh_phys_t;
enum zio_checksum {
ZIO_CHECKSUM_INHERIT = 0,
ZIO_CHECKSUM_ON,
ZIO_CHECKSUM_OFF,
ZIO_CHECKSUM_LABEL,
ZIO_CHECKSUM_GANG_HEADER,
ZIO_CHECKSUM_ZILOG,
ZIO_CHECKSUM_FLETCHER_2,
ZIO_CHECKSUM_FLETCHER_4,
ZIO_CHECKSUM_SHA256,
ZIO_CHECKSUM_ZILOG2,
ZIO_CHECKSUM_NOPARITY,
ZIO_CHECKSUM_SHA512,
ZIO_CHECKSUM_SKEIN,
ZIO_CHECKSUM_EDONR,
ZIO_CHECKSUM_BLAKE3,
ZIO_CHECKSUM_FUNCTIONS
};
/*
* The number of "legacy" compression functions which can be set on individual
* objects.
*/
#define ZIO_CHECKSUM_LEGACY_FUNCTIONS ZIO_CHECKSUM_ZILOG2
#define ZIO_CHECKSUM_ON_VALUE ZIO_CHECKSUM_FLETCHER_4
#define ZIO_CHECKSUM_DEFAULT ZIO_CHECKSUM_ON
#define ZIO_CHECKSUM_MASK 0xffULL
#define ZIO_CHECKSUM_VERIFY (1U << 8)
#define ZIO_DEDUPCHECKSUM ZIO_CHECKSUM_SHA256
/* macros defining encryption lengths */
#define ZIO_OBJSET_MAC_LEN 32
#define ZIO_DATA_IV_LEN 12
#define ZIO_DATA_SALT_LEN 8
#define ZIO_DATA_MAC_LEN 16
/*
* The number of "legacy" compression functions which can be set on individual
* objects.
*/
#define ZIO_COMPRESS_LEGACY_FUNCTIONS ZIO_COMPRESS_LZ4
/*
* The meaning of "compress = on" selected by the compression features enabled
* on a given pool.
*/
#define ZIO_COMPRESS_LEGACY_ON_VALUE ZIO_COMPRESS_LZJB
#define ZIO_COMPRESS_LZ4_ON_VALUE ZIO_COMPRESS_LZ4
#define ZIO_COMPRESS_DEFAULT ZIO_COMPRESS_ON
#define BOOTFS_COMPRESS_VALID(compress) \
((compress) == ZIO_COMPRESS_LZJB || \
(compress) == ZIO_COMPRESS_LZ4 || \
(compress) == ZIO_COMPRESS_GZIP_1 || \
(compress) == ZIO_COMPRESS_GZIP_2 || \
(compress) == ZIO_COMPRESS_GZIP_3 || \
(compress) == ZIO_COMPRESS_GZIP_4 || \
(compress) == ZIO_COMPRESS_GZIP_5 || \
(compress) == ZIO_COMPRESS_GZIP_6 || \
(compress) == ZIO_COMPRESS_GZIP_7 || \
(compress) == ZIO_COMPRESS_GZIP_8 || \
(compress) == ZIO_COMPRESS_GZIP_9 || \
(compress) == ZIO_COMPRESS_ZLE || \
(compress) == ZIO_COMPRESS_ZSTD || \
(compress) == ZIO_COMPRESS_ON || \
(compress) == ZIO_COMPRESS_OFF)
#define ZIO_COMPRESS_ALGO(x) (x & SPA_COMPRESSMASK)
#define ZIO_COMPRESS_LEVEL(x) ((x & ~SPA_COMPRESSMASK) >> SPA_COMPRESSBITS)
#define ZIO_COMPRESS_RAW(type, level) (type | ((level) << SPA_COMPRESSBITS))
#define ZIO_COMPLEVEL_ZSTD(level) \
ZIO_COMPRESS_RAW(ZIO_COMPRESS_ZSTD, level)
#define ZIO_FAILURE_MODE_WAIT 0
#define ZIO_FAILURE_MODE_CONTINUE 1
#define ZIO_FAILURE_MODE_PANIC 2
typedef enum zio_suspend_reason {
ZIO_SUSPEND_NONE = 0,
ZIO_SUSPEND_IOERR,
ZIO_SUSPEND_MMP,
} zio_suspend_reason_t;
/*
* This was originally an enum type. However, those are 32-bit and there is no
* way to make a 64-bit enum type. Since we ran out of bits for flags, we were
* forced to upgrade it to a uint64_t.
*/
typedef uint64_t zio_flag_t;
/*
* Flags inherited by gang, ddt, and vdev children,
* and that must be equal for two zios to aggregate
*/
#define ZIO_FLAG_DONT_AGGREGATE (1ULL << 0)
#define ZIO_FLAG_IO_REPAIR (1ULL << 1)
#define ZIO_FLAG_SELF_HEAL (1ULL << 2)
#define ZIO_FLAG_RESILVER (1ULL << 3)
#define ZIO_FLAG_SCRUB (1ULL << 4)
#define ZIO_FLAG_SCAN_THREAD (1ULL << 5)
#define ZIO_FLAG_PHYSICAL (1ULL << 6)
#define ZIO_FLAG_AGG_INHERIT (ZIO_FLAG_CANFAIL - 1)
/*
* Flags inherited by ddt, gang, and vdev children.
*/
#define ZIO_FLAG_CANFAIL (1ULL << 7) /* must be first for INHERIT */
#define ZIO_FLAG_SPECULATIVE (1ULL << 8)
#define ZIO_FLAG_CONFIG_WRITER (1ULL << 9)
#define ZIO_FLAG_DONT_RETRY (1ULL << 10)
#define ZIO_FLAG_NODATA (1ULL << 12)
#define ZIO_FLAG_INDUCE_DAMAGE (1ULL << 13)
#define ZIO_FLAG_IO_ALLOCATING (1ULL << 14)
#define ZIO_FLAG_DDT_INHERIT (ZIO_FLAG_IO_RETRY - 1)
#define ZIO_FLAG_GANG_INHERIT (ZIO_FLAG_IO_RETRY - 1)
/*
* Flags inherited by vdev children.
*/
#define ZIO_FLAG_IO_RETRY (1ULL << 15) /* must be first for INHERIT */
#define ZIO_FLAG_PROBE (1ULL << 16)
#define ZIO_FLAG_TRYHARD (1ULL << 17)
#define ZIO_FLAG_OPTIONAL (1ULL << 18)
#define ZIO_FLAG_VDEV_INHERIT (ZIO_FLAG_DONT_QUEUE - 1)
/*
* Flags not inherited by any children.
*/
#define ZIO_FLAG_DONT_QUEUE (1ULL << 19) /* must be first for INHERIT */
#define ZIO_FLAG_DONT_PROPAGATE (1ULL << 20)
#define ZIO_FLAG_IO_BYPASS (1ULL << 21)
#define ZIO_FLAG_IO_REWRITE (1ULL << 22)
#define ZIO_FLAG_RAW_COMPRESS (1ULL << 23)
#define ZIO_FLAG_RAW_ENCRYPT (1ULL << 24)
#define ZIO_FLAG_GANG_CHILD (1ULL << 25)
#define ZIO_FLAG_DDT_CHILD (1ULL << 26)
#define ZIO_FLAG_GODFATHER (1ULL << 27)
#define ZIO_FLAG_NOPWRITE (1ULL << 28)
#define ZIO_FLAG_REEXECUTED (1ULL << 29)
#define ZIO_FLAG_DELEGATED (1ULL << 30)
-#define ZIO_FLAG_FASTWRITE (1ULL << 31)
#define ZIO_FLAG_MUSTSUCCEED 0
#define ZIO_FLAG_RAW (ZIO_FLAG_RAW_COMPRESS | ZIO_FLAG_RAW_ENCRYPT)
#define ZIO_DDT_CHILD_FLAGS(zio) \
(((zio)->io_flags & ZIO_FLAG_DDT_INHERIT) | \
ZIO_FLAG_DDT_CHILD | ZIO_FLAG_CANFAIL)
#define ZIO_GANG_CHILD_FLAGS(zio) \
(((zio)->io_flags & ZIO_FLAG_GANG_INHERIT) | \
ZIO_FLAG_GANG_CHILD | ZIO_FLAG_CANFAIL)
#define ZIO_VDEV_CHILD_FLAGS(zio) \
(((zio)->io_flags & ZIO_FLAG_VDEV_INHERIT) | \
ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_CANFAIL)
#define ZIO_CHILD_BIT(x) (1U << (x))
#define ZIO_CHILD_BIT_IS_SET(val, x) ((val) & (1U << (x)))
enum zio_child {
ZIO_CHILD_VDEV = 0,
ZIO_CHILD_GANG,
ZIO_CHILD_DDT,
ZIO_CHILD_LOGICAL,
ZIO_CHILD_TYPES
};
#define ZIO_CHILD_VDEV_BIT ZIO_CHILD_BIT(ZIO_CHILD_VDEV)
#define ZIO_CHILD_GANG_BIT ZIO_CHILD_BIT(ZIO_CHILD_GANG)
#define ZIO_CHILD_DDT_BIT ZIO_CHILD_BIT(ZIO_CHILD_DDT)
#define ZIO_CHILD_LOGICAL_BIT ZIO_CHILD_BIT(ZIO_CHILD_LOGICAL)
#define ZIO_CHILD_ALL_BITS \
(ZIO_CHILD_VDEV_BIT | ZIO_CHILD_GANG_BIT | \
ZIO_CHILD_DDT_BIT | ZIO_CHILD_LOGICAL_BIT)
enum zio_wait_type {
ZIO_WAIT_READY = 0,
ZIO_WAIT_DONE,
ZIO_WAIT_TYPES
};
typedef void zio_done_func_t(zio_t *zio);
extern int zio_exclude_metadata;
extern int zio_dva_throttle_enabled;
extern const char *const zio_type_name[ZIO_TYPES];
/*
* A bookmark is a four-tuple <objset, object, level, blkid> that uniquely
* identifies any block in the pool. By convention, the meta-objset (MOS)
* is objset 0, and the meta-dnode is object 0. This covers all blocks
* except root blocks and ZIL blocks, which are defined as follows:
*
* Root blocks (objset_phys_t) are object 0, level -1: <objset, 0, -1, 0>.
* ZIL blocks are bookmarked <objset, 0, -2, blkid == ZIL sequence number>.
* dmu_sync()ed ZIL data blocks are bookmarked <objset, object, -2, blkid>.
* dnode visit bookmarks are <objset, object id of dnode, -3, 0>.
*
* Note: this structure is called a bookmark because its original purpose
* was to remember where to resume a pool-wide traverse.
*
* Note: this structure is passed between userland and the kernel, and is
* stored on disk (by virtue of being incorporated into other on-disk
* structures, e.g. dsl_scan_phys_t).
*
* If the head_errlog feature is enabled a different on-disk format for error
* logs is used. This introduces the use of an error bookmark, a four-tuple
* <object, level, blkid, birth> that uniquely identifies any error block
* in the pool. The birth transaction group is used to track whether the block
* has been overwritten by newer data or added to a snapshot since its marking
* as an error.
*/
struct zbookmark_phys {
uint64_t zb_objset;
uint64_t zb_object;
int64_t zb_level;
uint64_t zb_blkid;
};
struct zbookmark_err_phys {
uint64_t zb_object;
int64_t zb_level;
uint64_t zb_blkid;
uint64_t zb_birth;
};
#define SET_BOOKMARK(zb, objset, object, level, blkid) \
{ \
(zb)->zb_objset = objset; \
(zb)->zb_object = object; \
(zb)->zb_level = level; \
(zb)->zb_blkid = blkid; \
}
#define ZB_DESTROYED_OBJSET (-1ULL)
#define ZB_ROOT_OBJECT (0ULL)
#define ZB_ROOT_LEVEL (-1LL)
#define ZB_ROOT_BLKID (0ULL)
#define ZB_ZIL_OBJECT (0ULL)
#define ZB_ZIL_LEVEL (-2LL)
#define ZB_DNODE_LEVEL (-3LL)
#define ZB_DNODE_BLKID (0ULL)
#define ZB_IS_ZERO(zb) \
((zb)->zb_objset == 0 && (zb)->zb_object == 0 && \
(zb)->zb_level == 0 && (zb)->zb_blkid == 0)
#define ZB_IS_ROOT(zb) \
((zb)->zb_object == ZB_ROOT_OBJECT && \
(zb)->zb_level == ZB_ROOT_LEVEL && \
(zb)->zb_blkid == ZB_ROOT_BLKID)
typedef struct zio_prop {
enum zio_checksum zp_checksum;
enum zio_compress zp_compress;
uint8_t zp_complevel;
uint8_t zp_level;
uint8_t zp_copies;
dmu_object_type_t zp_type;
boolean_t zp_dedup;
boolean_t zp_dedup_verify;
boolean_t zp_nopwrite;
boolean_t zp_brtwrite;
boolean_t zp_encrypt;
boolean_t zp_byteorder;
uint8_t zp_salt[ZIO_DATA_SALT_LEN];
uint8_t zp_iv[ZIO_DATA_IV_LEN];
uint8_t zp_mac[ZIO_DATA_MAC_LEN];
uint32_t zp_zpl_smallblk;
} zio_prop_t;
typedef struct zio_cksum_report zio_cksum_report_t;
typedef void zio_cksum_finish_f(zio_cksum_report_t *rep,
const abd_t *good_data);
typedef void zio_cksum_free_f(void *cbdata, size_t size);
struct zio_bad_cksum; /* defined in zio_checksum.h */
struct dnode_phys;
struct abd;
struct zio_cksum_report {
struct zio_cksum_report *zcr_next;
nvlist_t *zcr_ereport;
nvlist_t *zcr_detector;
void *zcr_cbdata;
size_t zcr_cbinfo; /* passed to zcr_free() */
uint64_t zcr_sector;
uint64_t zcr_align;
uint64_t zcr_length;
zio_cksum_finish_f *zcr_finish;
zio_cksum_free_f *zcr_free;
/* internal use only */
struct zio_bad_cksum *zcr_ckinfo; /* information from failure */
};
typedef struct zio_vsd_ops {
zio_done_func_t *vsd_free;
} zio_vsd_ops_t;
typedef struct zio_gang_node {
zio_gbh_phys_t *gn_gbh;
struct zio_gang_node *gn_child[SPA_GBH_NBLKPTRS];
} zio_gang_node_t;
typedef zio_t *zio_gang_issue_func_t(zio_t *zio, blkptr_t *bp,
zio_gang_node_t *gn, struct abd *data, uint64_t offset);
typedef void zio_transform_func_t(zio_t *zio, struct abd *data, uint64_t size);
typedef struct zio_transform {
struct abd *zt_orig_abd;
uint64_t zt_orig_size;
uint64_t zt_bufsize;
zio_transform_func_t *zt_transform;
struct zio_transform *zt_next;
} zio_transform_t;
typedef zio_t *zio_pipe_stage_t(zio_t *zio);
/*
* The io_reexecute flags are distinct from io_flags because the child must
* be able to propagate them to the parent. The normal io_flags are local
* to the zio, not protected by any lock, and not modifiable by children;
* the reexecute flags are protected by io_lock, modifiable by children,
* and always propagated -- even when ZIO_FLAG_DONT_PROPAGATE is set.
*/
#define ZIO_REEXECUTE_NOW 0x01
#define ZIO_REEXECUTE_SUSPEND 0x02
/*
* The io_trim flags are used to specify the type of TRIM to perform. They
* only apply to ZIO_TYPE_TRIM zios are distinct from io_flags.
*/
enum trim_flag {
ZIO_TRIM_SECURE = 1U << 0,
};
typedef struct zio_alloc_list {
list_t zal_list;
uint64_t zal_size;
} zio_alloc_list_t;
typedef struct zio_link {
zio_t *zl_parent;
zio_t *zl_child;
list_node_t zl_parent_node;
list_node_t zl_child_node;
} zio_link_t;
enum zio_qstate {
ZIO_QS_NONE = 0,
ZIO_QS_QUEUED,
ZIO_QS_ACTIVE,
};
struct zio {
/* Core information about this I/O */
zbookmark_phys_t io_bookmark;
zio_prop_t io_prop;
zio_type_t io_type;
enum zio_child io_child_type;
enum trim_flag io_trim_flags;
int io_cmd;
zio_priority_t io_priority;
uint8_t io_reexecute;
uint8_t io_state[ZIO_WAIT_TYPES];
uint64_t io_txg;
spa_t *io_spa;
blkptr_t *io_bp;
blkptr_t *io_bp_override;
blkptr_t io_bp_copy;
list_t io_parent_list;
list_t io_child_list;
zio_t *io_logical;
zio_transform_t *io_transform_stack;
/* Callback info */
zio_done_func_t *io_ready;
zio_done_func_t *io_children_ready;
zio_done_func_t *io_done;
void *io_private;
int64_t io_prev_space_delta; /* DMU private */
blkptr_t io_bp_orig;
/* io_lsize != io_orig_size iff this is a raw write */
uint64_t io_lsize;
/* Data represented by this I/O */
struct abd *io_abd;
struct abd *io_orig_abd;
uint64_t io_size;
uint64_t io_orig_size;
/* Stuff for the vdev stack */
vdev_t *io_vd;
void *io_vsd;
const zio_vsd_ops_t *io_vsd_ops;
metaslab_class_t *io_metaslab_class; /* dva throttle class */
enum zio_qstate io_queue_state; /* vdev queue state */
union {
list_node_t l;
avl_node_t a;
} io_queue_node ____cacheline_aligned; /* allocator and vdev queues */
avl_node_t io_offset_node; /* vdev offset queues */
uint64_t io_offset;
hrtime_t io_timestamp; /* submitted at */
hrtime_t io_queued_timestamp;
hrtime_t io_target_timestamp;
hrtime_t io_delta; /* vdev queue service delta */
hrtime_t io_delay; /* Device access time (disk or */
/* file). */
zio_alloc_list_t io_alloc_list;
/* Internal pipeline state */
zio_flag_t io_flags;
enum zio_stage io_stage;
enum zio_stage io_pipeline;
zio_flag_t io_orig_flags;
enum zio_stage io_orig_stage;
enum zio_stage io_orig_pipeline;
enum zio_stage io_pipeline_trace;
int io_error;
int io_child_error[ZIO_CHILD_TYPES];
uint64_t io_children[ZIO_CHILD_TYPES][ZIO_WAIT_TYPES];
uint64_t *io_stall;
zio_t *io_gang_leader;
zio_gang_node_t *io_gang_tree;
void *io_executor;
void *io_waiter;
void *io_bio;
kmutex_t io_lock;
kcondvar_t io_cv;
int io_allocator;
/* FMA state */
zio_cksum_report_t *io_cksum_report;
uint64_t io_ena;
/* Taskq dispatching state */
taskq_ent_t io_tqent;
};
enum blk_verify_flag {
BLK_VERIFY_ONLY,
BLK_VERIFY_LOG,
BLK_VERIFY_HALT
};
enum blk_config_flag {
BLK_CONFIG_HELD, // SCL_VDEV held for writer
BLK_CONFIG_NEEDED, // SCL_VDEV should be obtained for reader
BLK_CONFIG_SKIP, // skip checks which require SCL_VDEV
};
extern int zio_bookmark_compare(const void *, const void *);
extern zio_t *zio_null(zio_t *pio, spa_t *spa, vdev_t *vd,
zio_done_func_t *done, void *priv, zio_flag_t flags);
extern zio_t *zio_root(spa_t *spa,
zio_done_func_t *done, void *priv, zio_flag_t flags);
extern void zio_destroy(zio_t *zio);
extern zio_t *zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
struct abd *data, uint64_t lsize, zio_done_func_t *done, void *priv,
zio_priority_t priority, zio_flag_t flags, const zbookmark_phys_t *zb);
extern zio_t *zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
struct abd *data, uint64_t size, uint64_t psize, const zio_prop_t *zp,
zio_done_func_t *ready, zio_done_func_t *children_ready,
zio_done_func_t *done, void *priv, zio_priority_t priority,
zio_flag_t flags, const zbookmark_phys_t *zb);
extern zio_t *zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
struct abd *data, uint64_t size, zio_done_func_t *done, void *priv,
zio_priority_t priority, zio_flag_t flags, zbookmark_phys_t *zb);
extern void zio_write_override(zio_t *zio, blkptr_t *bp, int copies,
boolean_t nopwrite, boolean_t brtwrite);
extern void zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp);
extern zio_t *zio_claim(zio_t *pio, spa_t *spa, uint64_t txg,
const blkptr_t *bp,
zio_done_func_t *done, void *priv, zio_flag_t flags);
extern zio_t *zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
zio_done_func_t *done, void *priv, zio_flag_t flags);
extern zio_t *zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
zio_done_func_t *done, void *priv, zio_priority_t priority,
zio_flag_t flags, enum trim_flag trim_flags);
extern zio_t *zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset,
uint64_t size, struct abd *data, int checksum,
zio_done_func_t *done, void *priv, zio_priority_t priority,
zio_flag_t flags, boolean_t labels);
extern zio_t *zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset,
uint64_t size, struct abd *data, int checksum,
zio_done_func_t *done, void *priv, zio_priority_t priority,
zio_flag_t flags, boolean_t labels);
extern zio_t *zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg,
const blkptr_t *bp, zio_flag_t flags);
extern int zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg,
blkptr_t *new_bp, uint64_t size, boolean_t *slog);
extern void zio_flush(zio_t *zio, vdev_t *vd);
extern void zio_shrink(zio_t *zio, uint64_t size);
extern int zio_wait(zio_t *zio);
extern void zio_nowait(zio_t *zio);
extern void zio_execute(void *zio);
extern void zio_interrupt(void *zio);
extern void zio_delay_init(zio_t *zio);
extern void zio_delay_interrupt(zio_t *zio);
extern void zio_deadman(zio_t *zio, const char *tag);
extern zio_t *zio_walk_parents(zio_t *cio, zio_link_t **);
extern zio_t *zio_walk_children(zio_t *pio, zio_link_t **);
extern zio_t *zio_unique_parent(zio_t *cio);
extern void zio_add_child(zio_t *pio, zio_t *cio);
extern void zio_add_child_first(zio_t *pio, zio_t *cio);
extern void *zio_buf_alloc(size_t size);
extern void zio_buf_free(void *buf, size_t size);
extern void *zio_data_buf_alloc(size_t size);
extern void zio_data_buf_free(void *buf, size_t size);
extern void zio_push_transform(zio_t *zio, struct abd *abd, uint64_t size,
uint64_t bufsize, zio_transform_func_t *transform);
extern void zio_pop_transforms(zio_t *zio);
extern void zio_resubmit_stage_async(void *);
extern zio_t *zio_vdev_child_io(zio_t *zio, blkptr_t *bp, vdev_t *vd,
uint64_t offset, struct abd *data, uint64_t size, int type,
zio_priority_t priority, zio_flag_t flags,
zio_done_func_t *done, void *priv);
extern zio_t *zio_vdev_delegated_io(vdev_t *vd, uint64_t offset,
struct abd *data, uint64_t size, zio_type_t type, zio_priority_t priority,
zio_flag_t flags, zio_done_func_t *done, void *priv);
extern void zio_vdev_io_bypass(zio_t *zio);
extern void zio_vdev_io_reissue(zio_t *zio);
extern void zio_vdev_io_redone(zio_t *zio);
extern void zio_change_priority(zio_t *pio, zio_priority_t priority);
extern void zio_checksum_verified(zio_t *zio);
extern int zio_worst_error(int e1, int e2);
extern enum zio_checksum zio_checksum_select(enum zio_checksum child,
enum zio_checksum parent);
extern enum zio_checksum zio_checksum_dedup_select(spa_t *spa,
enum zio_checksum child, enum zio_checksum parent);
extern enum zio_compress zio_compress_select(spa_t *spa,
enum zio_compress child, enum zio_compress parent);
extern uint8_t zio_complevel_select(spa_t *spa, enum zio_compress compress,
uint8_t child, uint8_t parent);
extern void zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t);
extern int zio_resume(spa_t *spa);
extern void zio_resume_wait(spa_t *spa);
extern boolean_t zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
enum blk_config_flag blk_config, enum blk_verify_flag blk_verify);
/*
* Initial setup and teardown.
*/
extern void zio_init(void);
extern void zio_fini(void);
/*
* Fault injection
*/
struct zinject_record;
extern uint32_t zio_injection_enabled;
extern int zio_inject_fault(char *name, int flags, int *id,
struct zinject_record *record);
extern int zio_inject_list_next(int *id, char *name, size_t buflen,
struct zinject_record *record);
extern int zio_clear_fault(int id);
extern void zio_handle_panic_injection(spa_t *spa, const char *tag,
uint64_t type);
extern int zio_handle_decrypt_injection(spa_t *spa, const zbookmark_phys_t *zb,
uint64_t type, int error);
extern int zio_handle_fault_injection(zio_t *zio, int error);
extern int zio_handle_device_injection(vdev_t *vd, zio_t *zio, int error);
extern int zio_handle_device_injections(vdev_t *vd, zio_t *zio, int err1,
int err2);
extern int zio_handle_label_injection(zio_t *zio, int error);
extern void zio_handle_ignored_writes(zio_t *zio);
extern hrtime_t zio_handle_io_delay(zio_t *zio);
/*
* Checksum ereport functions
*/
extern int zfs_ereport_start_checksum(spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, struct zio *zio, uint64_t offset,
uint64_t length, struct zio_bad_cksum *info);
extern void zfs_ereport_finish_checksum(zio_cksum_report_t *report,
const abd_t *good_data, const abd_t *bad_data, boolean_t drop_if_identical);
extern void zfs_ereport_free_checksum(zio_cksum_report_t *report);
/* If we have the good data in hand, this function can be used */
extern int zfs_ereport_post_checksum(spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, struct zio *zio, uint64_t offset,
uint64_t length, const abd_t *good_data, const abd_t *bad_data,
struct zio_bad_cksum *info);
void zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr);
extern void zfs_ereport_snapshot_post(const char *subclass, spa_t *spa,
const char *name);
/* Called from spa_sync(), but primarily an injection handler */
extern void spa_handle_ignored_writes(spa_t *spa);
/* zbookmark_phys functions */
boolean_t zbookmark_subtree_completed(const struct dnode_phys *dnp,
const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block);
boolean_t zbookmark_subtree_tbd(const struct dnode_phys *dnp,
const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block);
int zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2,
uint8_t ibs2, const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2);
#ifdef __cplusplus
}
#endif
#endif /* _ZIO_H */
diff --git a/sys/contrib/openzfs/include/sys/zio_checksum.h b/sys/contrib/openzfs/include/sys/zio_checksum.h
index 9fb79ab4a54b..37fd65b7cb3e 100644
--- a/sys/contrib/openzfs/include/sys/zio_checksum.h
+++ b/sys/contrib/openzfs/include/sys/zio_checksum.h
@@ -1,155 +1,153 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016 by Delphix. All rights reserved.
* Copyright (c) 2013 Saso Kiselkov, All rights reserved.
* Copyright (c) 2021 Tino Reichardt <milky-zfs@mcmilk.de>
*/
#ifndef _SYS_ZIO_CHECKSUM_H
#define _SYS_ZIO_CHECKSUM_H extern __attribute__((visibility("default")))
#include <sys/zio.h>
#include <zfeature_common.h>
#include <zfs_fletcher.h>
#ifdef __cplusplus
extern "C" {
#endif
struct abd;
/*
* Signature for checksum functions.
*/
typedef void zio_checksum_t(struct abd *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp);
typedef void *zio_checksum_tmpl_init_t(const zio_cksum_salt_t *salt);
typedef void zio_checksum_tmpl_free_t(void *ctx_template);
typedef enum zio_checksum_flags {
/* Strong enough for metadata? */
ZCHECKSUM_FLAG_METADATA = (1 << 1),
/* ZIO embedded checksum */
ZCHECKSUM_FLAG_EMBEDDED = (1 << 2),
/* Strong enough for dedup (without verification)? */
ZCHECKSUM_FLAG_DEDUP = (1 << 3),
/* Uses salt value */
ZCHECKSUM_FLAG_SALTED = (1 << 4),
/* Strong enough for nopwrite? */
ZCHECKSUM_FLAG_NOPWRITE = (1 << 5)
} zio_checksum_flags_t;
typedef enum {
ZIO_CHECKSUM_NATIVE,
ZIO_CHECKSUM_BYTESWAP
} zio_byteorder_t;
typedef struct zio_abd_checksum_data {
zio_byteorder_t acd_byteorder;
fletcher_4_ctx_t *acd_ctx;
zio_cksum_t *acd_zcp;
void *acd_private;
} zio_abd_checksum_data_t;
typedef void zio_abd_checksum_init_t(zio_abd_checksum_data_t *);
typedef void zio_abd_checksum_fini_t(zio_abd_checksum_data_t *);
typedef int zio_abd_checksum_iter_t(void *, size_t, void *);
typedef const struct zio_abd_checksum_func {
zio_abd_checksum_init_t *acf_init;
zio_abd_checksum_fini_t *acf_fini;
zio_abd_checksum_iter_t *acf_iter;
} zio_abd_checksum_func_t;
/*
* Information about each checksum function.
*/
typedef const struct zio_checksum_info {
/* checksum function for each byteorder */
zio_checksum_t *ci_func[2];
zio_checksum_tmpl_init_t *ci_tmpl_init;
zio_checksum_tmpl_free_t *ci_tmpl_free;
zio_checksum_flags_t ci_flags;
const char *ci_name; /* descriptive name */
} zio_checksum_info_t;
typedef struct zio_bad_cksum {
- zio_cksum_t zbc_expected;
- zio_cksum_t zbc_actual;
const char *zbc_checksum_name;
uint8_t zbc_byteswapped;
uint8_t zbc_injected;
uint8_t zbc_has_cksum; /* expected/actual valid */
} zio_bad_cksum_t;
_SYS_ZIO_CHECKSUM_H zio_checksum_info_t
zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS];
/*
* Checksum routines.
*/
/* SHA2 */
extern zio_checksum_t abd_checksum_sha256;
extern zio_checksum_t abd_checksum_sha512_native;
extern zio_checksum_t abd_checksum_sha512_byteswap;
/* Skein */
extern zio_checksum_t abd_checksum_skein_native;
extern zio_checksum_t abd_checksum_skein_byteswap;
extern zio_checksum_tmpl_init_t abd_checksum_skein_tmpl_init;
extern zio_checksum_tmpl_free_t abd_checksum_skein_tmpl_free;
/* Edon-R */
extern zio_checksum_t abd_checksum_edonr_native;
extern zio_checksum_t abd_checksum_edonr_byteswap;
extern zio_checksum_tmpl_init_t abd_checksum_edonr_tmpl_init;
extern zio_checksum_tmpl_free_t abd_checksum_edonr_tmpl_free;
/* BLAKE3 */
extern zio_checksum_t abd_checksum_blake3_native;
extern zio_checksum_t abd_checksum_blake3_byteswap;
extern zio_checksum_tmpl_init_t abd_checksum_blake3_tmpl_init;
extern zio_checksum_tmpl_free_t abd_checksum_blake3_tmpl_free;
/* Fletcher 4 */
_SYS_ZIO_CHECKSUM_H zio_abd_checksum_func_t fletcher_4_abd_ops;
extern zio_checksum_t abd_fletcher_4_native;
extern zio_checksum_t abd_fletcher_4_byteswap;
extern int zio_checksum_equal(spa_t *, blkptr_t *, enum zio_checksum,
void *, uint64_t, uint64_t, zio_bad_cksum_t *);
extern void zio_checksum_compute(zio_t *, enum zio_checksum,
struct abd *, uint64_t);
extern int zio_checksum_error_impl(spa_t *, const blkptr_t *, enum zio_checksum,
struct abd *, uint64_t, uint64_t, zio_bad_cksum_t *);
extern int zio_checksum_error(zio_t *zio, zio_bad_cksum_t *out);
extern enum zio_checksum spa_dedup_checksum(spa_t *spa);
extern void zio_checksum_templates_free(spa_t *spa);
extern spa_feature_t zio_checksum_to_feature(enum zio_checksum cksum);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_ZIO_CHECKSUM_H */
diff --git a/sys/contrib/openzfs/lib/libzfs/Makefile.am b/sys/contrib/openzfs/lib/libzfs/Makefile.am
index cffe341220c2..5e74d908de3d 100644
--- a/sys/contrib/openzfs/lib/libzfs/Makefile.am
+++ b/sys/contrib/openzfs/lib/libzfs/Makefile.am
@@ -1,77 +1,77 @@
libzfs_la_CFLAGS = $(AM_CFLAGS) $(LIBRARY_CFLAGS)
libzfs_la_CFLAGS += $(LIBCRYPTO_CFLAGS) $(ZLIB_CFLAGS)
libzfs_la_CFLAGS += -fvisibility=hidden
lib_LTLIBRARIES += libzfs.la
CPPCHECKTARGETS += libzfs.la
dist_libzfs_la_SOURCES = \
%D%/libzfs_impl.h \
%D%/libzfs_changelist.c \
%D%/libzfs_config.c \
%D%/libzfs_crypto.c \
%D%/libzfs_dataset.c \
%D%/libzfs_diff.c \
%D%/libzfs_import.c \
%D%/libzfs_iter.c \
%D%/libzfs_mount.c \
%D%/libzfs_pool.c \
%D%/libzfs_sendrecv.c \
%D%/libzfs_status.c \
%D%/libzfs_util.c
if BUILD_FREEBSD
dist_libzfs_la_SOURCES += \
%D%/os/freebsd/libzfs_compat.c \
%D%/os/freebsd/libzfs_zmount.c
endif
if BUILD_LINUX
dist_libzfs_la_SOURCES += \
%D%/os/linux/libzfs_mount_os.c \
%D%/os/linux/libzfs_pool_os.c \
%D%/os/linux/libzfs_util_os.c
endif
nodist_libzfs_la_SOURCES = \
module/zcommon/cityhash.c \
module/zcommon/zfeature_common.c \
module/zcommon/zfs_comutil.c \
module/zcommon/zfs_deleg.c \
module/zcommon/zfs_fletcher.c \
module/zcommon/zfs_fletcher_aarch64_neon.c \
module/zcommon/zfs_fletcher_avx512.c \
module/zcommon/zfs_fletcher_intel.c \
module/zcommon/zfs_fletcher_sse.c \
module/zcommon/zfs_fletcher_superscalar.c \
module/zcommon/zfs_fletcher_superscalar4.c \
module/zcommon/zfs_namecheck.c \
module/zcommon/zfs_prop.c \
module/zcommon/zpool_prop.c \
module/zcommon/zprop_common.c
libzfs_la_LIBADD = \
libshare.la \
libzfs_core.la \
libnvpair.la \
libzutil.la \
libuutil.la
-libzfs_la_LIBADD += -lm $(LIBCRYPTO_LIBS) $(ZLIB_LIBS) $(LIBFETCH_LIBS) $(LTLIBINTL)
+libzfs_la_LIBADD += -lrt -lm $(LIBCRYPTO_LIBS) $(ZLIB_LIBS) $(LIBFETCH_LIBS) $(LTLIBINTL)
libzfs_la_LDFLAGS = -pthread
if !ASAN_ENABLED
libzfs_la_LDFLAGS += -Wl,-z,defs
endif
if BUILD_FREEBSD
libzfs_la_LIBADD += -lutil -lgeom
endif
libzfs_la_LDFLAGS += -version-info 5:0:1
pkgconfig_DATA += %D%/libzfs.pc
dist_noinst_DATA += %D%/libzfs.abi %D%/libzfs.suppr
dist_noinst_DATA += %D%/THIRDPARTYLICENSE.openssl %D%/THIRDPARTYLICENSE.openssl.descrip
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_mount.c b/sys/contrib/openzfs/lib/libzfs/libzfs_mount.c
index 5d1fe651c97e..b38ad88096b2 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_mount.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_mount.c
@@ -1,1462 +1,1462 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2022 by Delphix. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright 2017 RackTop Systems.
* Copyright (c) 2018 Datto Inc.
* Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
*/
/*
* Routines to manage ZFS mounts. We separate all the nasty routines that have
* to deal with the OS. The following functions are the main entry points --
* they are used by mount and unmount and when changing a filesystem's
* mountpoint.
*
* zfs_is_mounted()
* zfs_mount()
* zfs_mount_at()
* zfs_unmount()
* zfs_unmountall()
*
* This file also contains the functions used to manage sharing filesystems:
*
* zfs_is_shared()
* zfs_share()
* zfs_unshare()
* zfs_unshareall()
* zfs_commit_shares()
*
* The following functions are available for pool consumers, and will
* mount/unmount and share/unshare all datasets within pool:
*
* zpool_enable_datasets()
* zpool_disable_datasets()
*/
#include <dirent.h>
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <zone.h>
#include <sys/mntent.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/vfs.h>
#include <sys/dsl_crypt.h>
#include <libzfs.h>
#include "libzfs_impl.h"
#include <thread_pool.h>
#include <libshare.h>
#include <sys/systeminfo.h>
#define MAXISALEN 257 /* based on sysinfo(2) man page */
static int mount_tp_nthr = 512; /* tpool threads for multi-threaded mounting */
static void zfs_mount_task(void *);
static const proto_table_t proto_table[SA_PROTOCOL_COUNT] = {
[SA_PROTOCOL_NFS] =
{ZFS_PROP_SHARENFS, EZFS_SHARENFSFAILED, EZFS_UNSHARENFSFAILED},
[SA_PROTOCOL_SMB] =
{ZFS_PROP_SHARESMB, EZFS_SHARESMBFAILED, EZFS_UNSHARESMBFAILED},
};
static const enum sa_protocol share_all_proto[SA_PROTOCOL_COUNT + 1] = {
SA_PROTOCOL_NFS,
SA_PROTOCOL_SMB,
SA_NO_PROTOCOL
};
static boolean_t
dir_is_empty_stat(const char *dirname)
{
struct stat st;
/*
* We only want to return false if the given path is a non empty
* directory, all other errors are handled elsewhere.
*/
if (stat(dirname, &st) < 0 || !S_ISDIR(st.st_mode)) {
return (B_TRUE);
}
/*
* An empty directory will still have two entries in it, one
* entry for each of "." and "..".
*/
if (st.st_size > 2) {
return (B_FALSE);
}
return (B_TRUE);
}
static boolean_t
dir_is_empty_readdir(const char *dirname)
{
DIR *dirp;
struct dirent64 *dp;
int dirfd;
if ((dirfd = openat(AT_FDCWD, dirname,
O_RDONLY | O_NDELAY | O_LARGEFILE | O_CLOEXEC, 0)) < 0) {
return (B_TRUE);
}
if ((dirp = fdopendir(dirfd)) == NULL) {
(void) close(dirfd);
return (B_TRUE);
}
while ((dp = readdir64(dirp)) != NULL) {
if (strcmp(dp->d_name, ".") == 0 ||
strcmp(dp->d_name, "..") == 0)
continue;
(void) closedir(dirp);
return (B_FALSE);
}
(void) closedir(dirp);
return (B_TRUE);
}
/*
* Returns true if the specified directory is empty. If we can't open the
* directory at all, return true so that the mount can fail with a more
* informative error message.
*/
static boolean_t
dir_is_empty(const char *dirname)
{
struct statfs64 st;
/*
* If the statvfs call fails or the filesystem is not a ZFS
* filesystem, fall back to the slow path which uses readdir.
*/
if ((statfs64(dirname, &st) != 0) ||
(st.f_type != ZFS_SUPER_MAGIC)) {
return (dir_is_empty_readdir(dirname));
}
/*
* At this point, we know the provided path is on a ZFS
* filesystem, so we can use stat instead of readdir to
* determine if the directory is empty or not. We try to avoid
* using readdir because that requires opening "dirname"; this
* open file descriptor can potentially end up in a child
* process if there's a concurrent fork, thus preventing the
* zfs_mount() from otherwise succeeding (the open file
* descriptor inherited by the child process will cause the
* parent's mount to fail with EBUSY). The performance
* implications of replacing the open, read, and close with a
* single stat is nice; but is not the main motivation for the
* added complexity.
*/
return (dir_is_empty_stat(dirname));
}
/*
* Checks to see if the mount is active. If the filesystem is mounted, we fill
* in 'where' with the current mountpoint, and return 1. Otherwise, we return
* 0.
*/
boolean_t
is_mounted(libzfs_handle_t *zfs_hdl, const char *special, char **where)
{
struct mnttab entry;
if (libzfs_mnttab_find(zfs_hdl, special, &entry) != 0)
return (B_FALSE);
if (where != NULL)
*where = zfs_strdup(zfs_hdl, entry.mnt_mountp);
return (B_TRUE);
}
boolean_t
zfs_is_mounted(zfs_handle_t *zhp, char **where)
{
return (is_mounted(zhp->zfs_hdl, zfs_get_name(zhp), where));
}
/*
* Checks any higher order concerns about whether the given dataset is
* mountable, false otherwise. zfs_is_mountable_internal specifically assumes
* that the caller has verified the sanity of mounting the dataset at
* its mountpoint to the extent the caller wants.
*/
static boolean_t
zfs_is_mountable_internal(zfs_handle_t *zhp)
{
if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED) &&
getzoneid() == GLOBAL_ZONEID)
return (B_FALSE);
return (B_TRUE);
}
/*
* Returns true if the given dataset is mountable, false otherwise. Returns the
* mountpoint in 'buf'.
*/
static boolean_t
zfs_is_mountable(zfs_handle_t *zhp, char *buf, size_t buflen,
zprop_source_t *source, int flags)
{
char sourceloc[MAXNAMELEN];
zprop_source_t sourcetype;
if (!zfs_prop_valid_for_type(ZFS_PROP_MOUNTPOINT, zhp->zfs_type,
B_FALSE))
return (B_FALSE);
verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, buf, buflen,
&sourcetype, sourceloc, sizeof (sourceloc), B_FALSE) == 0);
if (strcmp(buf, ZFS_MOUNTPOINT_NONE) == 0 ||
strcmp(buf, ZFS_MOUNTPOINT_LEGACY) == 0)
return (B_FALSE);
if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_OFF)
return (B_FALSE);
if (!zfs_is_mountable_internal(zhp))
return (B_FALSE);
if (zfs_prop_get_int(zhp, ZFS_PROP_REDACTED) && !(flags & MS_FORCE))
return (B_FALSE);
if (source)
*source = sourcetype;
return (B_TRUE);
}
/*
* The filesystem is mounted by invoking the system mount utility rather
* than by the system call mount(2). This ensures that the /etc/mtab
* file is correctly locked for the update. Performing our own locking
* and /etc/mtab update requires making an unsafe assumption about how
* the mount utility performs its locking. Unfortunately, this also means
* in the case of a mount failure we do not have the exact errno. We must
* make due with return value from the mount process.
*
* In the long term a shared library called libmount is under development
* which provides a common API to address the locking and errno issues.
* Once the standard mount utility has been updated to use this library
* we can add an autoconf check to conditionally use it.
*
* http://www.kernel.org/pub/linux/utils/util-linux/libmount-docs/index.html
*/
static int
zfs_add_option(zfs_handle_t *zhp, char *options, int len,
zfs_prop_t prop, const char *on, const char *off)
{
const char *source;
uint64_t value;
/* Skip adding duplicate default options */
if ((strstr(options, on) != NULL) || (strstr(options, off) != NULL))
return (0);
/*
* zfs_prop_get_int() is not used to ensure our mount options
* are not influenced by the current /proc/self/mounts contents.
*/
value = getprop_uint64(zhp, prop, &source);
(void) strlcat(options, ",", len);
(void) strlcat(options, value ? on : off, len);
return (0);
}
static int
zfs_add_options(zfs_handle_t *zhp, char *options, int len)
{
int error = 0;
error = zfs_add_option(zhp, options, len,
ZFS_PROP_ATIME, MNTOPT_ATIME, MNTOPT_NOATIME);
/*
* don't add relatime/strictatime when atime=off, otherwise strictatime
* will force atime=on
*/
if (strstr(options, MNTOPT_NOATIME) == NULL) {
error = zfs_add_option(zhp, options, len,
ZFS_PROP_RELATIME, MNTOPT_RELATIME, MNTOPT_STRICTATIME);
}
error = error ? error : zfs_add_option(zhp, options, len,
ZFS_PROP_DEVICES, MNTOPT_DEVICES, MNTOPT_NODEVICES);
error = error ? error : zfs_add_option(zhp, options, len,
ZFS_PROP_EXEC, MNTOPT_EXEC, MNTOPT_NOEXEC);
error = error ? error : zfs_add_option(zhp, options, len,
ZFS_PROP_READONLY, MNTOPT_RO, MNTOPT_RW);
error = error ? error : zfs_add_option(zhp, options, len,
ZFS_PROP_SETUID, MNTOPT_SETUID, MNTOPT_NOSETUID);
error = error ? error : zfs_add_option(zhp, options, len,
ZFS_PROP_NBMAND, MNTOPT_NBMAND, MNTOPT_NONBMAND);
return (error);
}
int
zfs_mount(zfs_handle_t *zhp, const char *options, int flags)
{
char mountpoint[ZFS_MAXPROPLEN];
if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL,
flags))
return (0);
return (zfs_mount_at(zhp, options, flags, mountpoint));
}
/*
* Mount the given filesystem.
*/
int
zfs_mount_at(zfs_handle_t *zhp, const char *options, int flags,
const char *mountpoint)
{
struct stat buf;
char mntopts[MNT_LINE_MAX];
char overlay[ZFS_MAXPROPLEN];
char prop_encroot[MAXNAMELEN];
boolean_t is_encroot;
zfs_handle_t *encroot_hp = zhp;
libzfs_handle_t *hdl = zhp->zfs_hdl;
uint64_t keystatus;
int remount = 0, rc;
if (options == NULL) {
(void) strlcpy(mntopts, MNTOPT_DEFAULTS, sizeof (mntopts));
} else {
(void) strlcpy(mntopts, options, sizeof (mntopts));
}
if (strstr(mntopts, MNTOPT_REMOUNT) != NULL)
remount = 1;
/* Potentially duplicates some checks if invoked by zfs_mount(). */
if (!zfs_is_mountable_internal(zhp))
return (0);
/*
* If the pool is imported read-only then all mounts must be read-only
*/
if (zpool_get_prop_int(zhp->zpool_hdl, ZPOOL_PROP_READONLY, NULL))
(void) strlcat(mntopts, "," MNTOPT_RO, sizeof (mntopts));
/*
* Append default mount options which apply to the mount point.
* This is done because under Linux (unlike Solaris) multiple mount
* points may reference a single super block. This means that just
* given a super block there is no back reference to update the per
* mount point options.
*/
rc = zfs_add_options(zhp, mntopts, sizeof (mntopts));
if (rc) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"default options unavailable"));
return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
mountpoint));
}
/*
* If the filesystem is encrypted the key must be loaded in order to
* mount. If the key isn't loaded, the MS_CRYPT flag decides whether
* or not we attempt to load the keys. Note: we must call
* zfs_refresh_properties() here since some callers of this function
* (most notably zpool_enable_datasets()) may implicitly load our key
* by loading the parent's key first.
*/
if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) {
zfs_refresh_properties(zhp);
keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
/*
* If the key is unavailable and MS_CRYPT is set give the
* user a chance to enter the key. Otherwise just fail
* immediately.
*/
if (keystatus == ZFS_KEYSTATUS_UNAVAILABLE) {
if (flags & MS_CRYPT) {
rc = zfs_crypto_get_encryption_root(zhp,
&is_encroot, prop_encroot);
if (rc) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Failed to get encryption root for "
"'%s'."), zfs_get_name(zhp));
return (rc);
}
if (!is_encroot) {
encroot_hp = zfs_open(hdl, prop_encroot,
ZFS_TYPE_DATASET);
if (encroot_hp == NULL)
return (hdl->libzfs_error);
}
rc = zfs_crypto_load_key(encroot_hp,
B_FALSE, NULL);
if (!is_encroot)
zfs_close(encroot_hp);
if (rc)
return (rc);
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption key not loaded"));
return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
mountpoint));
}
}
}
/*
* Append zfsutil option so the mount helper allow the mount
*/
strlcat(mntopts, "," MNTOPT_ZFSUTIL, sizeof (mntopts));
/* Create the directory if it doesn't already exist */
if (lstat(mountpoint, &buf) != 0) {
if (mkdirp(mountpoint, 0755) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"failed to create mountpoint: %s"),
strerror(errno));
return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
mountpoint));
}
}
/*
* Overlay mounts are enabled by default but may be disabled
* via the 'overlay' property. The -O flag remains for compatibility.
*/
if (!(flags & MS_OVERLAY)) {
if (zfs_prop_get(zhp, ZFS_PROP_OVERLAY, overlay,
sizeof (overlay), NULL, NULL, 0, B_FALSE) == 0) {
if (strcmp(overlay, "on") == 0) {
flags |= MS_OVERLAY;
}
}
}
/*
* Determine if the mountpoint is empty. If so, refuse to perform the
* mount. We don't perform this check if 'remount' is
* specified or if overlay option (-O) is given
*/
if ((flags & MS_OVERLAY) == 0 && !remount &&
!dir_is_empty(mountpoint)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"directory is not empty"));
return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
dgettext(TEXT_DOMAIN, "cannot mount '%s'"), mountpoint));
}
/* perform the mount */
rc = do_mount(zhp, mountpoint, mntopts, flags);
if (rc) {
/*
* Generic errors are nasty, but there are just way too many
* from mount(), and they're well-understood. We pick a few
* common ones to improve upon.
*/
if (rc == EBUSY) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"mountpoint or dataset is busy"));
} else if (rc == EPERM) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Insufficient privileges"));
} else if (rc == ENOTSUP) {
int spa_version;
VERIFY(zfs_spa_version(zhp, &spa_version) == 0);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Can't mount a version %llu "
"file system on a version %d pool. Pool must be"
" upgraded to mount this file system."),
(u_longlong_t)zfs_prop_get_int(zhp,
ZFS_PROP_VERSION), spa_version);
} else {
zfs_error_aux(hdl, "%s", strerror(rc));
}
return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
zhp->zfs_name));
}
/* remove the mounted entry before re-adding on remount */
if (remount)
libzfs_mnttab_remove(hdl, zhp->zfs_name);
/* add the mounted entry into our cache */
libzfs_mnttab_add(hdl, zfs_get_name(zhp), mountpoint, mntopts);
return (0);
}
/*
* Unmount a single filesystem.
*/
static int
unmount_one(zfs_handle_t *zhp, const char *mountpoint, int flags)
{
int error;
error = do_unmount(zhp, mountpoint, flags);
if (error != 0) {
int libzfs_err;
switch (error) {
case EBUSY:
libzfs_err = EZFS_BUSY;
break;
case EIO:
libzfs_err = EZFS_IO;
break;
case ENOENT:
libzfs_err = EZFS_NOENT;
break;
case ENOMEM:
libzfs_err = EZFS_NOMEM;
break;
case EPERM:
libzfs_err = EZFS_PERM;
break;
default:
libzfs_err = EZFS_UMOUNTFAILED;
}
if (zhp) {
return (zfs_error_fmt(zhp->zfs_hdl, libzfs_err,
dgettext(TEXT_DOMAIN, "cannot unmount '%s'"),
mountpoint));
} else {
return (-1);
}
}
return (0);
}
/*
* Unmount the given filesystem.
*/
int
zfs_unmount(zfs_handle_t *zhp, const char *mountpoint, int flags)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
struct mnttab entry;
char *mntpt = NULL;
boolean_t encroot, unmounted = B_FALSE;
/* check to see if we need to unmount the filesystem */
if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
libzfs_mnttab_find(hdl, zhp->zfs_name, &entry) == 0)) {
/*
* mountpoint may have come from a call to
* getmnt/getmntany if it isn't NULL. If it is NULL,
* we know it comes from libzfs_mnttab_find which can
* then get freed later. We strdup it to play it safe.
*/
if (mountpoint == NULL)
mntpt = zfs_strdup(hdl, entry.mnt_mountp);
else
mntpt = zfs_strdup(hdl, mountpoint);
/*
* Unshare and unmount the filesystem
*/
if (zfs_unshare(zhp, mntpt, share_all_proto) != 0) {
free(mntpt);
return (-1);
}
zfs_commit_shares(NULL);
if (unmount_one(zhp, mntpt, flags) != 0) {
free(mntpt);
(void) zfs_share(zhp, NULL);
zfs_commit_shares(NULL);
return (-1);
}
libzfs_mnttab_remove(hdl, zhp->zfs_name);
free(mntpt);
unmounted = B_TRUE;
}
/*
* If the MS_CRYPT flag is provided we must ensure we attempt to
* unload the dataset's key regardless of whether we did any work
* to unmount it. We only do this for encryption roots.
*/
if ((flags & MS_CRYPT) != 0 &&
zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) {
zfs_refresh_properties(zhp);
if (zfs_crypto_get_encryption_root(zhp, &encroot, NULL) != 0 &&
unmounted) {
(void) zfs_mount(zhp, NULL, 0);
return (-1);
}
if (encroot && zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
ZFS_KEYSTATUS_AVAILABLE &&
zfs_crypto_unload_key(zhp) != 0) {
(void) zfs_mount(zhp, NULL, 0);
return (-1);
}
}
zpool_disable_volume_os(zhp->zfs_name);
return (0);
}
/*
* Unmount this filesystem and any children inheriting the mountpoint property.
* To do this, just act like we're changing the mountpoint property, but don't
* remount the filesystems afterwards.
*/
int
zfs_unmountall(zfs_handle_t *zhp, int flags)
{
prop_changelist_t *clp;
int ret;
clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT,
CL_GATHER_ITER_MOUNTED, flags);
if (clp == NULL)
return (-1);
ret = changelist_prefix(clp);
changelist_free(clp);
return (ret);
}
/*
* Unshare a filesystem by mountpoint.
*/
static int
unshare_one(libzfs_handle_t *hdl, const char *name, const char *mountpoint,
enum sa_protocol proto)
{
int err = sa_disable_share(mountpoint, proto);
if (err != SA_OK)
return (zfs_error_fmt(hdl, proto_table[proto].p_unshare_err,
dgettext(TEXT_DOMAIN, "cannot unshare '%s': %s"),
name, sa_errorstr(err)));
return (0);
}
/*
* Share the given filesystem according to the options in the specified
* protocol specific properties (sharenfs, sharesmb). We rely
* on "libshare" to do the dirty work for us.
*/
int
zfs_share(zfs_handle_t *zhp, const enum sa_protocol *proto)
{
char mountpoint[ZFS_MAXPROPLEN];
char shareopts[ZFS_MAXPROPLEN];
char sourcestr[ZFS_MAXPROPLEN];
const enum sa_protocol *curr_proto;
zprop_source_t sourcetype;
int err = 0;
if (proto == NULL)
proto = share_all_proto;
if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL, 0))
return (0);
for (curr_proto = proto; *curr_proto != SA_NO_PROTOCOL; curr_proto++) {
/*
* Return success if there are no share options.
*/
if (zfs_prop_get(zhp, proto_table[*curr_proto].p_prop,
shareopts, sizeof (shareopts), &sourcetype, sourcestr,
ZFS_MAXPROPLEN, B_FALSE) != 0 ||
strcmp(shareopts, "off") == 0)
continue;
/*
* If the 'zoned' property is set, then zfs_is_mountable()
* will have already bailed out if we are in the global zone.
* But local zones cannot be NFS servers, so we ignore it for
* local zones as well.
*/
if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED))
continue;
err = sa_enable_share(zfs_get_name(zhp), mountpoint, shareopts,
*curr_proto);
if (err != SA_OK) {
return (zfs_error_fmt(zhp->zfs_hdl,
proto_table[*curr_proto].p_share_err,
dgettext(TEXT_DOMAIN, "cannot share '%s: %s'"),
zfs_get_name(zhp), sa_errorstr(err)));
}
}
return (0);
}
/*
* Check to see if the filesystem is currently shared.
*/
boolean_t
zfs_is_shared(zfs_handle_t *zhp, char **where,
const enum sa_protocol *proto)
{
char *mountpoint;
if (proto == NULL)
proto = share_all_proto;
if (ZFS_IS_VOLUME(zhp))
return (B_FALSE);
if (!zfs_is_mounted(zhp, &mountpoint))
return (B_FALSE);
for (const enum sa_protocol *p = proto; *p != SA_NO_PROTOCOL; ++p)
if (sa_is_shared(mountpoint, *p)) {
if (where != NULL)
*where = mountpoint;
else
free(mountpoint);
return (B_TRUE);
}
free(mountpoint);
return (B_FALSE);
}
void
zfs_commit_shares(const enum sa_protocol *proto)
{
if (proto == NULL)
proto = share_all_proto;
for (const enum sa_protocol *p = proto; *p != SA_NO_PROTOCOL; ++p)
sa_commit_shares(*p);
}
void
zfs_truncate_shares(const enum sa_protocol *proto)
{
if (proto == NULL)
proto = share_all_proto;
for (const enum sa_protocol *p = proto; *p != SA_NO_PROTOCOL; ++p)
sa_truncate_shares(*p);
}
/*
* Unshare the given filesystem.
*/
int
zfs_unshare(zfs_handle_t *zhp, const char *mountpoint,
const enum sa_protocol *proto)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
struct mnttab entry;
if (proto == NULL)
proto = share_all_proto;
if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
libzfs_mnttab_find(hdl, zfs_get_name(zhp), &entry) == 0)) {
/* check to see if need to unmount the filesystem */
const char *mntpt = mountpoint ?: entry.mnt_mountp;
for (const enum sa_protocol *curr_proto = proto;
*curr_proto != SA_NO_PROTOCOL; curr_proto++)
if (sa_is_shared(mntpt, *curr_proto) &&
unshare_one(hdl, zhp->zfs_name,
mntpt, *curr_proto) != 0)
return (-1);
}
return (0);
}
/*
* Same as zfs_unmountall(), but for NFS and SMB unshares.
*/
int
zfs_unshareall(zfs_handle_t *zhp, const enum sa_protocol *proto)
{
prop_changelist_t *clp;
int ret;
if (proto == NULL)
proto = share_all_proto;
clp = changelist_gather(zhp, ZFS_PROP_SHARENFS, 0, 0);
if (clp == NULL)
return (-1);
ret = changelist_unshare(clp, proto);
changelist_free(clp);
return (ret);
}
/*
* Remove the mountpoint associated with the current dataset, if necessary.
* We only remove the underlying directory if:
*
* - The mountpoint is not 'none' or 'legacy'
* - The mountpoint is non-empty
* - The mountpoint is the default or inherited
* - The 'zoned' property is set, or we're in a local zone
*
* Any other directories we leave alone.
*/
void
remove_mountpoint(zfs_handle_t *zhp)
{
char mountpoint[ZFS_MAXPROPLEN];
zprop_source_t source;
if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint),
&source, 0))
return;
if (source == ZPROP_SRC_DEFAULT ||
source == ZPROP_SRC_INHERITED) {
/*
* Try to remove the directory, silently ignoring any errors.
* The filesystem may have since been removed or moved around,
* and this error isn't really useful to the administrator in
* any way.
*/
(void) rmdir(mountpoint);
}
}
/*
* Add the given zfs handle to the cb_handles array, dynamically reallocating
* the array if it is out of space.
*/
void
libzfs_add_handle(get_all_cb_t *cbp, zfs_handle_t *zhp)
{
if (cbp->cb_alloc == cbp->cb_used) {
size_t newsz;
zfs_handle_t **newhandles;
newsz = cbp->cb_alloc != 0 ? cbp->cb_alloc * 2 : 64;
newhandles = zfs_realloc(zhp->zfs_hdl,
cbp->cb_handles, cbp->cb_alloc * sizeof (zfs_handle_t *),
newsz * sizeof (zfs_handle_t *));
cbp->cb_handles = newhandles;
cbp->cb_alloc = newsz;
}
cbp->cb_handles[cbp->cb_used++] = zhp;
}
/*
* Recursive helper function used during file system enumeration
*/
static int
zfs_iter_cb(zfs_handle_t *zhp, void *data)
{
get_all_cb_t *cbp = data;
if (!(zfs_get_type(zhp) & ZFS_TYPE_FILESYSTEM)) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_NOAUTO) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
ZFS_KEYSTATUS_UNAVAILABLE) {
zfs_close(zhp);
return (0);
}
/*
* If this filesystem is inconsistent and has a receive resume
* token, we can not mount it.
*/
if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) &&
zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
NULL, 0, NULL, NULL, 0, B_TRUE) == 0) {
zfs_close(zhp);
return (0);
}
libzfs_add_handle(cbp, zhp);
if (zfs_iter_filesystems_v2(zhp, 0, zfs_iter_cb, cbp) != 0) {
zfs_close(zhp);
return (-1);
}
return (0);
}
/*
* Sort comparator that compares two mountpoint paths. We sort these paths so
* that subdirectories immediately follow their parents. This means that we
* effectively treat the '/' character as the lowest value non-nul char.
* Since filesystems from non-global zones can have the same mountpoint
* as other filesystems, the comparator sorts global zone filesystems to
* the top of the list. This means that the global zone will traverse the
* filesystem list in the correct order and can stop when it sees the
* first zoned filesystem. In a non-global zone, only the delegated
* filesystems are seen.
*
* An example sorted list using this comparator would look like:
*
* /foo
* /foo/bar
* /foo/bar/baz
* /foo/baz
* /foo.bar
* /foo (NGZ1)
* /foo (NGZ2)
*
* The mounting code depends on this ordering to deterministically iterate
* over filesystems in order to spawn parallel mount tasks.
*/
static int
mountpoint_cmp(const void *arga, const void *argb)
{
zfs_handle_t *const *zap = arga;
zfs_handle_t *za = *zap;
zfs_handle_t *const *zbp = argb;
zfs_handle_t *zb = *zbp;
char mounta[MAXPATHLEN];
char mountb[MAXPATHLEN];
const char *a = mounta;
const char *b = mountb;
boolean_t gota, gotb;
uint64_t zoneda, zonedb;
zoneda = zfs_prop_get_int(za, ZFS_PROP_ZONED);
zonedb = zfs_prop_get_int(zb, ZFS_PROP_ZONED);
if (zoneda && !zonedb)
return (1);
if (!zoneda && zonedb)
return (-1);
gota = (zfs_get_type(za) == ZFS_TYPE_FILESYSTEM);
if (gota) {
verify(zfs_prop_get(za, ZFS_PROP_MOUNTPOINT, mounta,
sizeof (mounta), NULL, NULL, 0, B_FALSE) == 0);
}
gotb = (zfs_get_type(zb) == ZFS_TYPE_FILESYSTEM);
if (gotb) {
verify(zfs_prop_get(zb, ZFS_PROP_MOUNTPOINT, mountb,
sizeof (mountb), NULL, NULL, 0, B_FALSE) == 0);
}
if (gota && gotb) {
while (*a != '\0' && (*a == *b)) {
a++;
b++;
}
if (*a == *b)
return (0);
if (*a == '\0')
return (-1);
if (*b == '\0')
return (1);
if (*a == '/')
return (-1);
if (*b == '/')
return (1);
return (*a < *b ? -1 : *a > *b);
}
if (gota)
return (-1);
if (gotb)
return (1);
/*
* If neither filesystem has a mountpoint, revert to sorting by
* dataset name.
*/
return (strcmp(zfs_get_name(za), zfs_get_name(zb)));
}
/*
* Return true if path2 is a child of path1 or path2 equals path1 or
* path1 is "/" (path2 is always a child of "/").
*/
static boolean_t
libzfs_path_contains(const char *path1, const char *path2)
{
return (strcmp(path1, path2) == 0 || strcmp(path1, "/") == 0 ||
(strstr(path2, path1) == path2 && path2[strlen(path1)] == '/'));
}
/*
* Given a mountpoint specified by idx in the handles array, find the first
* non-descendent of that mountpoint and return its index. Descendant paths
* start with the parent's path. This function relies on the ordering
* enforced by mountpoint_cmp().
*/
static int
non_descendant_idx(zfs_handle_t **handles, size_t num_handles, int idx)
{
char parent[ZFS_MAXPROPLEN];
char child[ZFS_MAXPROPLEN];
int i;
verify(zfs_prop_get(handles[idx], ZFS_PROP_MOUNTPOINT, parent,
sizeof (parent), NULL, NULL, 0, B_FALSE) == 0);
for (i = idx + 1; i < num_handles; i++) {
verify(zfs_prop_get(handles[i], ZFS_PROP_MOUNTPOINT, child,
sizeof (child), NULL, NULL, 0, B_FALSE) == 0);
if (!libzfs_path_contains(parent, child))
break;
}
return (i);
}
typedef struct mnt_param {
libzfs_handle_t *mnt_hdl;
tpool_t *mnt_tp;
zfs_handle_t **mnt_zhps; /* filesystems to mount */
size_t mnt_num_handles;
int mnt_idx; /* Index of selected entry to mount */
zfs_iter_f mnt_func;
void *mnt_data;
} mnt_param_t;
/*
* Allocate and populate the parameter struct for mount function, and
* schedule mounting of the entry selected by idx.
*/
static void
zfs_dispatch_mount(libzfs_handle_t *hdl, zfs_handle_t **handles,
size_t num_handles, int idx, zfs_iter_f func, void *data, tpool_t *tp)
{
mnt_param_t *mnt_param = zfs_alloc(hdl, sizeof (mnt_param_t));
mnt_param->mnt_hdl = hdl;
mnt_param->mnt_tp = tp;
mnt_param->mnt_zhps = handles;
mnt_param->mnt_num_handles = num_handles;
mnt_param->mnt_idx = idx;
mnt_param->mnt_func = func;
mnt_param->mnt_data = data;
(void) tpool_dispatch(tp, zfs_mount_task, (void*)mnt_param);
}
/*
* This is the structure used to keep state of mounting or sharing operations
* during a call to zpool_enable_datasets().
*/
typedef struct mount_state {
/*
* ms_mntstatus is set to -1 if any mount fails. While multiple threads
* could update this variable concurrently, no synchronization is
* needed as it's only ever set to -1.
*/
int ms_mntstatus;
int ms_mntflags;
const char *ms_mntopts;
} mount_state_t;
static int
zfs_mount_one(zfs_handle_t *zhp, void *arg)
{
mount_state_t *ms = arg;
int ret = 0;
/*
* don't attempt to mount encrypted datasets with
* unloaded keys
*/
if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
ZFS_KEYSTATUS_UNAVAILABLE)
return (0);
if (zfs_mount(zhp, ms->ms_mntopts, ms->ms_mntflags) != 0)
ret = ms->ms_mntstatus = -1;
return (ret);
}
static int
zfs_share_one(zfs_handle_t *zhp, void *arg)
{
mount_state_t *ms = arg;
int ret = 0;
if (zfs_share(zhp, NULL) != 0)
ret = ms->ms_mntstatus = -1;
return (ret);
}
/*
* Thread pool function to mount one file system. On completion, it finds and
* schedules its children to be mounted. This depends on the sorting done in
* zfs_foreach_mountpoint(). Note that the degenerate case (chain of entries
* each descending from the previous) will have no parallelism since we always
* have to wait for the parent to finish mounting before we can schedule
* its children.
*/
static void
zfs_mount_task(void *arg)
{
mnt_param_t *mp = arg;
int idx = mp->mnt_idx;
zfs_handle_t **handles = mp->mnt_zhps;
size_t num_handles = mp->mnt_num_handles;
char mountpoint[ZFS_MAXPROPLEN];
verify(zfs_prop_get(handles[idx], ZFS_PROP_MOUNTPOINT, mountpoint,
sizeof (mountpoint), NULL, NULL, 0, B_FALSE) == 0);
if (mp->mnt_func(handles[idx], mp->mnt_data) != 0)
goto out;
/*
* We dispatch tasks to mount filesystems with mountpoints underneath
* this one. We do this by dispatching the next filesystem with a
* descendant mountpoint of the one we just mounted, then skip all of
* its descendants, dispatch the next descendant mountpoint, and so on.
* The non_descendant_idx() function skips over filesystems that are
* descendants of the filesystem we just dispatched.
*/
for (int i = idx + 1; i < num_handles;
i = non_descendant_idx(handles, num_handles, i)) {
char child[ZFS_MAXPROPLEN];
verify(zfs_prop_get(handles[i], ZFS_PROP_MOUNTPOINT,
child, sizeof (child), NULL, NULL, 0, B_FALSE) == 0);
if (!libzfs_path_contains(mountpoint, child))
break; /* not a descendant, return */
zfs_dispatch_mount(mp->mnt_hdl, handles, num_handles, i,
mp->mnt_func, mp->mnt_data, mp->mnt_tp);
}
out:
free(mp);
}
/*
* Issue the func callback for each ZFS handle contained in the handles
* array. This function is used to mount all datasets, and so this function
* guarantees that filesystems for parent mountpoints are called before their
* children. As such, before issuing any callbacks, we first sort the array
* of handles by mountpoint.
*
* Callbacks are issued in one of two ways:
*
* 1. Sequentially: If the parallel argument is B_FALSE or the ZFS_SERIAL_MOUNT
* environment variable is set, then we issue callbacks sequentially.
*
* 2. In parallel: If the parallel argument is B_TRUE and the ZFS_SERIAL_MOUNT
* environment variable is not set, then we use a tpool to dispatch threads
* to mount filesystems in parallel. This function dispatches tasks to mount
* the filesystems at the top-level mountpoints, and these tasks in turn
* are responsible for recursively mounting filesystems in their children
* mountpoints.
*/
void
zfs_foreach_mountpoint(libzfs_handle_t *hdl, zfs_handle_t **handles,
size_t num_handles, zfs_iter_f func, void *data, boolean_t parallel)
{
zoneid_t zoneid = getzoneid();
/*
* The ZFS_SERIAL_MOUNT environment variable is an undocumented
* variable that can be used as a convenience to do a/b comparison
* of serial vs. parallel mounting.
*/
boolean_t serial_mount = !parallel ||
(getenv("ZFS_SERIAL_MOUNT") != NULL);
/*
* Sort the datasets by mountpoint. See mountpoint_cmp for details
* of how these are sorted.
*/
qsort(handles, num_handles, sizeof (zfs_handle_t *), mountpoint_cmp);
if (serial_mount) {
for (int i = 0; i < num_handles; i++) {
func(handles[i], data);
}
return;
}
/*
* Issue the callback function for each dataset using a parallel
* algorithm that uses a thread pool to manage threads.
*/
tpool_t *tp = tpool_create(1, mount_tp_nthr, 0, NULL);
/*
* There may be multiple "top level" mountpoints outside of the pool's
* root mountpoint, e.g.: /foo /bar. Dispatch a mount task for each of
* these.
*/
for (int i = 0; i < num_handles;
i = non_descendant_idx(handles, num_handles, i)) {
/*
* Since the mountpoints have been sorted so that the zoned
* filesystems are at the end, a zoned filesystem seen from
* the global zone means that we're done.
*/
if (zoneid == GLOBAL_ZONEID &&
zfs_prop_get_int(handles[i], ZFS_PROP_ZONED))
break;
zfs_dispatch_mount(hdl, handles, num_handles, i, func, data,
tp);
}
tpool_wait(tp); /* wait for all scheduled mounts to complete */
tpool_destroy(tp);
}
/*
* Mount and share all datasets within the given pool. This assumes that no
* datasets within the pool are currently mounted.
*/
int
zpool_enable_datasets(zpool_handle_t *zhp, const char *mntopts, int flags)
{
get_all_cb_t cb = { 0 };
mount_state_t ms = { 0 };
zfs_handle_t *zfsp;
int ret = 0;
if ((zfsp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
ZFS_TYPE_DATASET)) == NULL)
goto out;
/*
* Gather all non-snapshot datasets within the pool. Start by adding
* the root filesystem for this pool to the list, and then iterate
* over all child filesystems.
*/
libzfs_add_handle(&cb, zfsp);
if (zfs_iter_filesystems_v2(zfsp, 0, zfs_iter_cb, &cb) != 0)
goto out;
/*
* Mount all filesystems
*/
ms.ms_mntopts = mntopts;
ms.ms_mntflags = flags;
zfs_foreach_mountpoint(zhp->zpool_hdl, cb.cb_handles, cb.cb_used,
zfs_mount_one, &ms, B_TRUE);
if (ms.ms_mntstatus != 0)
- ret = ms.ms_mntstatus;
+ ret = EZFS_MOUNTFAILED;
/*
* Share all filesystems that need to be shared. This needs to be
* a separate pass because libshare is not mt-safe, and so we need
* to share serially.
*/
ms.ms_mntstatus = 0;
zfs_foreach_mountpoint(zhp->zpool_hdl, cb.cb_handles, cb.cb_used,
zfs_share_one, &ms, B_FALSE);
if (ms.ms_mntstatus != 0)
- ret = ms.ms_mntstatus;
+ ret = EZFS_SHAREFAILED;
else
zfs_commit_shares(NULL);
out:
for (int i = 0; i < cb.cb_used; i++)
zfs_close(cb.cb_handles[i]);
free(cb.cb_handles);
return (ret);
}
struct sets_s {
char *mountpoint;
zfs_handle_t *dataset;
};
static int
mountpoint_compare(const void *a, const void *b)
{
const struct sets_s *mounta = (struct sets_s *)a;
const struct sets_s *mountb = (struct sets_s *)b;
return (strcmp(mountb->mountpoint, mounta->mountpoint));
}
/*
* Unshare and unmount all datasets within the given pool. We don't want to
* rely on traversing the DSL to discover the filesystems within the pool,
* because this may be expensive (if not all of them are mounted), and can fail
* arbitrarily (on I/O error, for example). Instead, we walk /proc/self/mounts
* and gather all the filesystems that are currently mounted.
*/
int
zpool_disable_datasets(zpool_handle_t *zhp, boolean_t force)
{
int used, alloc;
FILE *mnttab;
struct mnttab entry;
size_t namelen;
struct sets_s *sets = NULL;
libzfs_handle_t *hdl = zhp->zpool_hdl;
int i;
int ret = -1;
int flags = (force ? MS_FORCE : 0);
namelen = strlen(zhp->zpool_name);
if ((mnttab = fopen(MNTTAB, "re")) == NULL)
return (ENOENT);
used = alloc = 0;
while (getmntent(mnttab, &entry) == 0) {
/*
* Ignore non-ZFS entries.
*/
if (entry.mnt_fstype == NULL ||
strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
continue;
/*
* Ignore filesystems not within this pool.
*/
if (entry.mnt_mountp == NULL ||
strncmp(entry.mnt_special, zhp->zpool_name, namelen) != 0 ||
(entry.mnt_special[namelen] != '/' &&
entry.mnt_special[namelen] != '\0'))
continue;
/*
* At this point we've found a filesystem within our pool. Add
* it to our growing list.
*/
if (used == alloc) {
if (alloc == 0) {
sets = zfs_alloc(hdl,
8 * sizeof (struct sets_s));
alloc = 8;
} else {
sets = zfs_realloc(hdl, sets,
alloc * sizeof (struct sets_s),
alloc * 2 * sizeof (struct sets_s));
alloc *= 2;
}
}
sets[used].mountpoint = zfs_strdup(hdl, entry.mnt_mountp);
/*
* This is allowed to fail, in case there is some I/O error. It
* is only used to determine if we need to remove the underlying
* mountpoint, so failure is not fatal.
*/
sets[used].dataset = make_dataset_handle(hdl,
entry.mnt_special);
used++;
}
/*
* At this point, we have the entire list of filesystems, so sort it by
* mountpoint.
*/
if (used != 0)
qsort(sets, used, sizeof (struct sets_s), mountpoint_compare);
/*
* Walk through and first unshare everything.
*/
for (i = 0; i < used; i++) {
for (enum sa_protocol p = 0; p < SA_PROTOCOL_COUNT; ++p) {
if (sa_is_shared(sets[i].mountpoint, p) &&
unshare_one(hdl, sets[i].mountpoint,
sets[i].mountpoint, p) != 0)
goto out;
}
}
zfs_commit_shares(NULL);
/*
* Now unmount everything, removing the underlying directories as
* appropriate.
*/
for (i = 0; i < used; i++) {
if (unmount_one(sets[i].dataset, sets[i].mountpoint,
flags) != 0)
goto out;
}
for (i = 0; i < used; i++) {
if (sets[i].dataset)
remove_mountpoint(sets[i].dataset);
}
zpool_disable_datasets_os(zhp, force);
ret = 0;
out:
(void) fclose(mnttab);
for (i = 0; i < used; i++) {
if (sets[i].dataset)
zfs_close(sets[i].dataset);
free(sets[i].mountpoint);
}
free(sets);
return (ret);
}
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c b/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c
index d4af31c50cf8..85564edfd862 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c
@@ -1,5391 +1,5397 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright (c) 2018 Datto Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
* Copyright (c) 2021, Klara Inc.
*/
#include <errno.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <libgen.h>
#include <zone.h>
#include <sys/stat.h>
#include <sys/efi_partition.h>
#include <sys/systeminfo.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_sysfs.h>
#include <sys/vdev_disk.h>
#include <sys/types.h>
#include <dlfcn.h>
#include <libzutil.h>
#include <fcntl.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
#include "libzfs_impl.h"
#include "zfs_comutil.h"
#include "zfeature_common.h"
static boolean_t zpool_vdev_is_interior(const char *name);
typedef struct prop_flags {
unsigned int create:1; /* Validate property on creation */
unsigned int import:1; /* Validate property on import */
unsigned int vdevprop:1; /* Validate property as a VDEV property */
} prop_flags_t;
/*
* ====================================================================
* zpool property functions
* ====================================================================
*/
static int
zpool_get_all_props(zpool_handle_t *zhp)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zcmd_alloc_dst_nvlist(hdl, &zc, 0);
while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
if (errno == ENOMEM)
zcmd_expand_dst_nvlist(hdl, &zc);
else {
zcmd_free_nvlists(&zc);
return (-1);
}
}
if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
zcmd_free_nvlists(&zc);
return (0);
}
int
zpool_props_refresh(zpool_handle_t *zhp)
{
nvlist_t *old_props;
old_props = zhp->zpool_props;
if (zpool_get_all_props(zhp) != 0)
return (-1);
nvlist_free(old_props);
return (0);
}
static const char *
zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
zprop_source_t *src)
{
nvlist_t *nv, *nvl;
const char *value;
zprop_source_t source;
nvl = zhp->zpool_props;
if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
value = fnvlist_lookup_string(nv, ZPROP_VALUE);
} else {
source = ZPROP_SRC_DEFAULT;
if ((value = zpool_prop_default_string(prop)) == NULL)
value = "-";
}
if (src)
*src = source;
return (value);
}
uint64_t
zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
{
nvlist_t *nv, *nvl;
uint64_t value;
zprop_source_t source;
if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
/*
* zpool_get_all_props() has most likely failed because
* the pool is faulted, but if all we need is the top level
* vdev's guid then get it from the zhp config nvlist.
*/
if ((prop == ZPOOL_PROP_GUID) &&
(nvlist_lookup_nvlist(zhp->zpool_config,
ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
== 0)) {
return (value);
}
return (zpool_prop_default_numeric(prop));
}
nvl = zhp->zpool_props;
if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
value = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
} else {
source = ZPROP_SRC_DEFAULT;
value = zpool_prop_default_numeric(prop);
}
if (src)
*src = source;
return (value);
}
/*
* Map VDEV STATE to printed strings.
*/
const char *
zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
{
switch (state) {
case VDEV_STATE_CLOSED:
case VDEV_STATE_OFFLINE:
return (gettext("OFFLINE"));
case VDEV_STATE_REMOVED:
return (gettext("REMOVED"));
case VDEV_STATE_CANT_OPEN:
if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
return (gettext("FAULTED"));
else if (aux == VDEV_AUX_SPLIT_POOL)
return (gettext("SPLIT"));
else
return (gettext("UNAVAIL"));
case VDEV_STATE_FAULTED:
return (gettext("FAULTED"));
case VDEV_STATE_DEGRADED:
return (gettext("DEGRADED"));
case VDEV_STATE_HEALTHY:
return (gettext("ONLINE"));
default:
break;
}
return (gettext("UNKNOWN"));
}
/*
* Map POOL STATE to printed strings.
*/
const char *
zpool_pool_state_to_name(pool_state_t state)
{
switch (state) {
default:
break;
case POOL_STATE_ACTIVE:
return (gettext("ACTIVE"));
case POOL_STATE_EXPORTED:
return (gettext("EXPORTED"));
case POOL_STATE_DESTROYED:
return (gettext("DESTROYED"));
case POOL_STATE_SPARE:
return (gettext("SPARE"));
case POOL_STATE_L2CACHE:
return (gettext("L2CACHE"));
case POOL_STATE_UNINITIALIZED:
return (gettext("UNINITIALIZED"));
case POOL_STATE_UNAVAIL:
return (gettext("UNAVAIL"));
case POOL_STATE_POTENTIALLY_ACTIVE:
return (gettext("POTENTIALLY_ACTIVE"));
}
return (gettext("UNKNOWN"));
}
/*
* Given a pool handle, return the pool health string ("ONLINE", "DEGRADED",
* "SUSPENDED", etc).
*/
const char *
zpool_get_state_str(zpool_handle_t *zhp)
{
zpool_errata_t errata;
zpool_status_t status;
const char *str;
status = zpool_get_status(zhp, NULL, &errata);
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
str = gettext("FAULTED");
} else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT ||
status == ZPOOL_STATUS_IO_FAILURE_MMP) {
str = gettext("SUSPENDED");
} else {
nvlist_t *nvroot = fnvlist_lookup_nvlist(
zpool_get_config(zhp, NULL), ZPOOL_CONFIG_VDEV_TREE);
uint_t vsc;
vdev_stat_t *vs = (vdev_stat_t *)fnvlist_lookup_uint64_array(
nvroot, ZPOOL_CONFIG_VDEV_STATS, &vsc);
str = zpool_state_to_name(vs->vs_state, vs->vs_aux);
}
return (str);
}
/*
* Get a zpool property value for 'prop' and return the value in
* a pre-allocated buffer.
*/
int
zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
size_t len, zprop_source_t *srctype, boolean_t literal)
{
uint64_t intval;
const char *strval;
zprop_source_t src = ZPROP_SRC_NONE;
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
switch (prop) {
case ZPOOL_PROP_NAME:
(void) strlcpy(buf, zpool_get_name(zhp), len);
break;
case ZPOOL_PROP_HEALTH:
(void) strlcpy(buf, zpool_get_state_str(zhp), len);
break;
case ZPOOL_PROP_GUID:
intval = zpool_get_prop_int(zhp, prop, &src);
(void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
break;
case ZPOOL_PROP_ALTROOT:
case ZPOOL_PROP_CACHEFILE:
case ZPOOL_PROP_COMMENT:
case ZPOOL_PROP_COMPATIBILITY:
if (zhp->zpool_props != NULL ||
zpool_get_all_props(zhp) == 0) {
(void) strlcpy(buf,
zpool_get_prop_string(zhp, prop, &src),
len);
break;
}
zfs_fallthrough;
default:
(void) strlcpy(buf, "-", len);
break;
}
if (srctype != NULL)
*srctype = src;
return (0);
}
if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
prop != ZPOOL_PROP_NAME)
return (-1);
switch (zpool_prop_get_type(prop)) {
case PROP_TYPE_STRING:
(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
len);
break;
case PROP_TYPE_NUMBER:
intval = zpool_get_prop_int(zhp, prop, &src);
switch (prop) {
case ZPOOL_PROP_SIZE:
case ZPOOL_PROP_ALLOCATED:
case ZPOOL_PROP_FREE:
case ZPOOL_PROP_FREEING:
case ZPOOL_PROP_LEAKED:
case ZPOOL_PROP_ASHIFT:
case ZPOOL_PROP_MAXBLOCKSIZE:
case ZPOOL_PROP_MAXDNODESIZE:
case ZPOOL_PROP_BCLONESAVED:
case ZPOOL_PROP_BCLONEUSED:
if (literal)
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
else
(void) zfs_nicenum(intval, buf, len);
break;
case ZPOOL_PROP_EXPANDSZ:
case ZPOOL_PROP_CHECKPOINT:
if (intval == 0) {
(void) strlcpy(buf, "-", len);
} else if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) zfs_nicebytes(intval, buf, len);
}
break;
case ZPOOL_PROP_CAPACITY:
if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) snprintf(buf, len, "%llu%%",
(u_longlong_t)intval);
}
break;
case ZPOOL_PROP_FRAGMENTATION:
if (intval == UINT64_MAX) {
(void) strlcpy(buf, "-", len);
} else if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) snprintf(buf, len, "%llu%%",
(u_longlong_t)intval);
}
break;
case ZPOOL_PROP_BCLONERATIO:
case ZPOOL_PROP_DEDUPRATIO:
if (literal)
(void) snprintf(buf, len, "%llu.%02llu",
(u_longlong_t)(intval / 100),
(u_longlong_t)(intval % 100));
else
(void) snprintf(buf, len, "%llu.%02llux",
(u_longlong_t)(intval / 100),
(u_longlong_t)(intval % 100));
break;
case ZPOOL_PROP_HEALTH:
(void) strlcpy(buf, zpool_get_state_str(zhp), len);
break;
case ZPOOL_PROP_VERSION:
if (intval >= SPA_VERSION_FEATURES) {
(void) snprintf(buf, len, "-");
break;
}
zfs_fallthrough;
default:
(void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
}
break;
case PROP_TYPE_INDEX:
intval = zpool_get_prop_int(zhp, prop, &src);
if (zpool_prop_index_to_string(prop, intval, &strval)
!= 0)
return (-1);
(void) strlcpy(buf, strval, len);
break;
default:
abort();
}
if (srctype)
*srctype = src;
return (0);
}
/*
* Get a zpool property value for 'propname' and return the value in
* a pre-allocated buffer.
*/
int
zpool_get_userprop(zpool_handle_t *zhp, const char *propname, char *buf,
size_t len, zprop_source_t *srctype)
{
nvlist_t *nv, *nvl;
uint64_t ival;
const char *value;
zprop_source_t source = ZPROP_SRC_LOCAL;
nvl = zhp->zpool_props;
if (nvlist_lookup_nvlist(nvl, propname, &nv) == 0) {
if (nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0)
source = ival;
verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
} else {
source = ZPROP_SRC_DEFAULT;
value = "-";
}
if (srctype)
*srctype = source;
(void) strlcpy(buf, value, len);
return (0);
}
/*
* Check if the bootfs name has the same pool name as it is set to.
* Assuming bootfs is a valid dataset name.
*/
static boolean_t
bootfs_name_valid(const char *pool, const char *bootfs)
{
int len = strlen(pool);
if (bootfs[0] == '\0')
return (B_TRUE);
if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
return (B_FALSE);
if (strncmp(pool, bootfs, len) == 0 &&
(bootfs[len] == '/' || bootfs[len] == '\0'))
return (B_TRUE);
return (B_FALSE);
}
/*
* Given an nvlist of zpool properties to be set, validate that they are
* correct, and parse any numeric properties (index, boolean, etc) if they are
* specified as strings.
*/
static nvlist_t *
zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
{
nvpair_t *elem;
nvlist_t *retprops;
zpool_prop_t prop;
const char *strval;
uint64_t intval;
const char *slash, *check;
struct stat64 statbuf;
zpool_handle_t *zhp;
char report[1024];
if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (NULL);
}
elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
const char *propname = nvpair_name(elem);
if (flags.vdevprop && zpool_prop_vdev(propname)) {
vdev_prop_t vprop = vdev_name_to_prop(propname);
if (vdev_prop_readonly(vprop)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
"is readonly"), propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY,
errbuf);
goto error;
}
if (zprop_parse_value(hdl, elem, vprop, ZFS_TYPE_VDEV,
retprops, &strval, &intval, errbuf) != 0)
goto error;
continue;
} else if (flags.vdevprop && vdev_prop_user(propname)) {
if (nvlist_add_nvpair(retprops, elem) != 0) {
(void) no_memory(hdl);
goto error;
}
continue;
} else if (flags.vdevprop) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property: '%s'"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
prop = zpool_name_to_prop(propname);
if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
int err;
char *fname = strchr(propname, '@') + 1;
err = zfeature_lookup_name(fname, NULL);
if (err != 0) {
ASSERT3U(err, ==, ENOENT);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"feature '%s' unsupported by kernel"),
fname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (nvpair_type(elem) != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
(void) nvpair_value_string(elem, &strval);
if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set to "
"'enabled' or 'disabled'"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (!flags.create &&
strcmp(strval, ZFS_FEATURE_DISABLED) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set to "
"'disabled' at creation time"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (nvlist_add_uint64(retprops, propname, 0) != 0) {
(void) no_memory(hdl);
goto error;
}
continue;
} else if (prop == ZPOOL_PROP_INVAL &&
zfs_prop_user(propname)) {
/*
* This is a user property: make sure it's a
* string, and that it's less than ZAP_MAXNAMELEN.
*/
if (nvpair_type(elem) != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property name '%s' is too long"),
propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
(void) nvpair_value_string(elem, &strval);
if (strlen(strval) >= ZFS_MAXPROPLEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property value '%s' is too long"),
strval);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (nvlist_add_string(retprops, propname,
strval) != 0) {
(void) no_memory(hdl);
goto error;
}
continue;
}
/*
* Make sure this property is valid and applies to this type.
*/
if (prop == ZPOOL_PROP_INVAL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property '%s'"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (zpool_prop_readonly(prop)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
"is readonly"), propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
goto error;
}
if (!flags.create && zpool_prop_setonce(prop)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set at "
"creation time"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
&strval, &intval, errbuf) != 0)
goto error;
/*
* Perform additional checking for specific properties.
*/
switch (prop) {
case ZPOOL_PROP_VERSION:
if (intval < version ||
!SPA_VERSION_IS_SUPPORTED(intval)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' number %llu is invalid."),
propname, (unsigned long long)intval);
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
goto error;
}
break;
case ZPOOL_PROP_ASHIFT:
if (intval != 0 &&
(intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' number %llu is invalid, "
"only values between %" PRId32 " and %"
PRId32 " are allowed."),
propname, (unsigned long long)intval,
ASHIFT_MIN, ASHIFT_MAX);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_BOOTFS:
if (flags.create || flags.import) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' cannot be set at creation "
"or import time"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (version < SPA_VERSION_BOOTFS) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to support "
"'%s' property"), propname);
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
goto error;
}
/*
* bootfs property value has to be a dataset name and
* the dataset has to be in the same pool as it sets to.
*/
if (!bootfs_name_valid(poolname, strval)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
"is an invalid name"), strval);
(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto error;
}
if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"could not open pool '%s'"), poolname);
(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
goto error;
}
zpool_close(zhp);
break;
case ZPOOL_PROP_ALTROOT:
if (!flags.create && !flags.import) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set during pool "
"creation or import"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (strval[0] != '/') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"bad alternate root '%s'"), strval);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
break;
case ZPOOL_PROP_CACHEFILE:
if (strval[0] == '\0')
break;
if (strcmp(strval, "none") == 0)
break;
if (strval[0] != '/') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' must be empty, an "
"absolute path, or 'none'"), propname);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
slash = strrchr(strval, '/');
if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
strcmp(slash, "/..") == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is not a valid file"), strval);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
*(char *)slash = '\0';
if (strval[0] != '\0' &&
(stat64(strval, &statbuf) != 0 ||
!S_ISDIR(statbuf.st_mode))) {
*(char *)slash = '/';
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is not a valid directory"),
strval);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
*(char *)slash = '/';
break;
case ZPOOL_PROP_COMPATIBILITY:
switch (zpool_load_compat(strval, NULL, report, 1024)) {
case ZPOOL_COMPATIBILITY_OK:
case ZPOOL_COMPATIBILITY_WARNTOKEN:
break;
case ZPOOL_COMPATIBILITY_BADFILE:
case ZPOOL_COMPATIBILITY_BADTOKEN:
case ZPOOL_COMPATIBILITY_NOFILES:
zfs_error_aux(hdl, "%s", report);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_COMMENT:
for (check = strval; *check != '\0'; check++) {
if (!isprint(*check)) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"comment may only have printable "
"characters"));
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
}
if (strlen(strval) > ZPROP_MAX_COMMENT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"comment must not exceed %d characters"),
ZPROP_MAX_COMMENT);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_READONLY:
if (!flags.import) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set at "
"import time"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_MULTIHOST:
if (get_system_hostid() == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"requires a non-zero system hostid"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_DEDUPDITTO:
printf("Note: property '%s' no longer has "
"any effect\n", propname);
break;
default:
break;
}
}
return (retprops);
error:
nvlist_free(retprops);
return (NULL);
}
/*
* Set zpool property : propname=propval.
*/
int
zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
{
zfs_cmd_t zc = {"\0"};
int ret = -1;
char errbuf[ERRBUFLEN];
nvlist_t *nvl = NULL;
nvlist_t *realprops;
uint64_t version;
prop_flags_t flags = { 0 };
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
zhp->zpool_name);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
return (no_memory(zhp->zpool_hdl));
if (nvlist_add_string(nvl, propname, propval) != 0) {
nvlist_free(nvl);
return (no_memory(zhp->zpool_hdl));
}
version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
nvlist_free(nvl);
return (-1);
}
nvlist_free(nvl);
nvl = realprops;
/*
* Execute the corresponding ioctl() to set this property.
*/
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl);
ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
zcmd_free_nvlists(&zc);
nvlist_free(nvl);
if (ret)
(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
else
(void) zpool_props_refresh(zhp);
return (ret);
}
int
zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp,
zfs_type_t type, boolean_t literal)
{
libzfs_handle_t *hdl = zhp->zpool_hdl;
zprop_list_t *entry;
char buf[ZFS_MAXPROPLEN];
nvlist_t *features = NULL;
nvpair_t *nvp;
zprop_list_t **last;
boolean_t firstexpand = (NULL == *plp);
int i;
if (zprop_expand_list(hdl, plp, type) != 0)
return (-1);
if (type == ZFS_TYPE_VDEV)
return (0);
last = plp;
while (*last != NULL)
last = &(*last)->pl_next;
if ((*plp)->pl_all)
features = zpool_get_features(zhp);
if ((*plp)->pl_all && firstexpand) {
/* Handle userprops in the all properties case */
if (zhp->zpool_props == NULL && zpool_props_refresh(zhp))
return (-1);
nvp = NULL;
while ((nvp = nvlist_next_nvpair(zhp->zpool_props, nvp)) !=
NULL) {
const char *propname = nvpair_name(nvp);
if (!zfs_prop_user(propname))
continue;
entry = zfs_alloc(hdl, sizeof (zprop_list_t));
entry->pl_prop = ZPROP_USERPROP;
entry->pl_user_prop = zfs_strdup(hdl, propname);
entry->pl_width = strlen(entry->pl_user_prop);
entry->pl_all = B_TRUE;
*last = entry;
last = &entry->pl_next;
}
for (i = 0; i < SPA_FEATURES; i++) {
entry = zfs_alloc(hdl, sizeof (zprop_list_t));
entry->pl_prop = ZPROP_USERPROP;
entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
spa_feature_table[i].fi_uname);
entry->pl_width = strlen(entry->pl_user_prop);
entry->pl_all = B_TRUE;
*last = entry;
last = &entry->pl_next;
}
}
/* add any unsupported features */
for (nvp = nvlist_next_nvpair(features, NULL);
nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
char *propname;
boolean_t found;
if (zfeature_is_supported(nvpair_name(nvp)))
continue;
propname = zfs_asprintf(hdl, "unsupported@%s",
nvpair_name(nvp));
/*
* Before adding the property to the list make sure that no
* other pool already added the same property.
*/
found = B_FALSE;
entry = *plp;
while (entry != NULL) {
if (entry->pl_user_prop != NULL &&
strcmp(propname, entry->pl_user_prop) == 0) {
found = B_TRUE;
break;
}
entry = entry->pl_next;
}
if (found) {
free(propname);
continue;
}
entry = zfs_alloc(hdl, sizeof (zprop_list_t));
entry->pl_prop = ZPROP_USERPROP;
entry->pl_user_prop = propname;
entry->pl_width = strlen(entry->pl_user_prop);
entry->pl_all = B_TRUE;
*last = entry;
last = &entry->pl_next;
}
for (entry = *plp; entry != NULL; entry = entry->pl_next) {
if (entry->pl_fixed && !literal)
continue;
if (entry->pl_prop != ZPROP_USERPROP &&
zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
NULL, literal) == 0) {
if (strlen(buf) > entry->pl_width)
entry->pl_width = strlen(buf);
} else if (entry->pl_prop == ZPROP_INVAL &&
zfs_prop_user(entry->pl_user_prop) &&
zpool_get_userprop(zhp, entry->pl_user_prop, buf,
sizeof (buf), NULL) == 0) {
if (strlen(buf) > entry->pl_width)
entry->pl_width = strlen(buf);
}
}
return (0);
}
int
vdev_expand_proplist(zpool_handle_t *zhp, const char *vdevname,
zprop_list_t **plp)
{
zprop_list_t *entry;
char buf[ZFS_MAXPROPLEN];
const char *strval = NULL;
int err = 0;
nvpair_t *elem = NULL;
nvlist_t *vprops = NULL;
nvlist_t *propval = NULL;
const char *propname;
vdev_prop_t prop;
zprop_list_t **last;
for (entry = *plp; entry != NULL; entry = entry->pl_next) {
if (entry->pl_fixed)
continue;
if (zpool_get_vdev_prop(zhp, vdevname, entry->pl_prop,
entry->pl_user_prop, buf, sizeof (buf), NULL,
B_FALSE) == 0) {
if (strlen(buf) > entry->pl_width)
entry->pl_width = strlen(buf);
}
if (entry->pl_prop == VDEV_PROP_NAME &&
strlen(vdevname) > entry->pl_width)
entry->pl_width = strlen(vdevname);
}
/* Handle the all properties case */
last = plp;
if (*last != NULL && (*last)->pl_all == B_TRUE) {
while (*last != NULL)
last = &(*last)->pl_next;
err = zpool_get_all_vdev_props(zhp, vdevname, &vprops);
if (err != 0)
return (err);
while ((elem = nvlist_next_nvpair(vprops, elem)) != NULL) {
propname = nvpair_name(elem);
/* Skip properties that are not user defined */
if ((prop = vdev_name_to_prop(propname)) !=
VDEV_PROP_USERPROP)
continue;
if (nvpair_value_nvlist(elem, &propval) != 0)
continue;
strval = fnvlist_lookup_string(propval, ZPROP_VALUE);
entry = zfs_alloc(zhp->zpool_hdl,
sizeof (zprop_list_t));
entry->pl_prop = prop;
entry->pl_user_prop = zfs_strdup(zhp->zpool_hdl,
propname);
entry->pl_width = strlen(strval);
entry->pl_all = B_TRUE;
*last = entry;
last = &entry->pl_next;
}
}
return (0);
}
/*
* Get the state for the given feature on the given ZFS pool.
*/
int
zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
size_t len)
{
uint64_t refcount;
boolean_t found = B_FALSE;
nvlist_t *features = zpool_get_features(zhp);
boolean_t supported;
const char *feature = strchr(propname, '@') + 1;
supported = zpool_prop_feature(propname);
ASSERT(supported || zpool_prop_unsupported(propname));
/*
* Convert from feature name to feature guid. This conversion is
* unnecessary for unsupported@... properties because they already
* use guids.
*/
if (supported) {
int ret;
spa_feature_t fid;
ret = zfeature_lookup_name(feature, &fid);
if (ret != 0) {
(void) strlcpy(buf, "-", len);
return (ENOTSUP);
}
feature = spa_feature_table[fid].fi_guid;
}
if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
found = B_TRUE;
if (supported) {
if (!found) {
(void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
} else {
if (refcount == 0)
(void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
else
(void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
}
} else {
if (found) {
if (refcount == 0) {
(void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
} else {
(void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
}
} else {
(void) strlcpy(buf, "-", len);
return (ENOTSUP);
}
}
return (0);
}
/*
* Validate the given pool name, optionally putting an extended error message in
* 'buf'.
*/
boolean_t
zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
{
namecheck_err_t why;
char what;
int ret;
ret = pool_namecheck(pool, &why, &what);
/*
* The rules for reserved pool names were extended at a later point.
* But we need to support users with existing pools that may now be
* invalid. So we only check for this expanded set of names during a
* create (or import), and only in userland.
*/
if (ret == 0 && !isopen &&
(strncmp(pool, "mirror", 6) == 0 ||
strncmp(pool, "raidz", 5) == 0 ||
strncmp(pool, "draid", 5) == 0 ||
strncmp(pool, "spare", 5) == 0 ||
strcmp(pool, "log") == 0)) {
if (hdl != NULL)
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "name is reserved"));
return (B_FALSE);
}
if (ret != 0) {
if (hdl != NULL) {
switch (why) {
case NAME_ERR_TOOLONG:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "name is too long"));
break;
case NAME_ERR_INVALCHAR:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "invalid character "
"'%c' in pool name"), what);
break;
case NAME_ERR_NOLETTER:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name must begin with a letter"));
break;
case NAME_ERR_RESERVED:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name is reserved"));
break;
case NAME_ERR_DISKLIKE:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool name is reserved"));
break;
case NAME_ERR_LEADING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"leading slash in name"));
break;
case NAME_ERR_EMPTY_COMPONENT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"empty component in name"));
break;
case NAME_ERR_TRAILING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"trailing slash in name"));
break;
case NAME_ERR_MULTIPLE_DELIMITERS:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"multiple '@' and/or '#' delimiters in "
"name"));
break;
case NAME_ERR_NO_AT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"permission set is missing '@'"));
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"(%d) not defined"), why);
break;
}
}
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Open a handle to the given pool, even if the pool is currently in the FAULTED
* state.
*/
zpool_handle_t *
zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
{
zpool_handle_t *zhp;
boolean_t missing;
/*
* Make sure the pool name is valid.
*/
if (!zpool_name_valid(hdl, B_TRUE, pool)) {
(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
dgettext(TEXT_DOMAIN, "cannot open '%s'"),
pool);
return (NULL);
}
zhp = zfs_alloc(hdl, sizeof (zpool_handle_t));
zhp->zpool_hdl = hdl;
(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
if (zpool_refresh_stats(zhp, &missing) != 0) {
zpool_close(zhp);
return (NULL);
}
if (missing) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
(void) zfs_error_fmt(hdl, EZFS_NOENT,
dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
zpool_close(zhp);
return (NULL);
}
return (zhp);
}
/*
* Like the above, but silent on error. Used when iterating over pools (because
* the configuration cache may be out of date).
*/
int
zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
{
zpool_handle_t *zhp;
boolean_t missing;
zhp = zfs_alloc(hdl, sizeof (zpool_handle_t));
zhp->zpool_hdl = hdl;
(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
if (zpool_refresh_stats(zhp, &missing) != 0) {
zpool_close(zhp);
return (-1);
}
if (missing) {
zpool_close(zhp);
*ret = NULL;
return (0);
}
*ret = zhp;
return (0);
}
/*
* Similar to zpool_open_canfail(), but refuses to open pools in the faulted
* state.
*/
zpool_handle_t *
zpool_open(libzfs_handle_t *hdl, const char *pool)
{
zpool_handle_t *zhp;
if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
return (NULL);
if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
zpool_close(zhp);
return (NULL);
}
return (zhp);
}
/*
* Close the handle. Simply frees the memory associated with the handle.
*/
void
zpool_close(zpool_handle_t *zhp)
{
nvlist_free(zhp->zpool_config);
nvlist_free(zhp->zpool_old_config);
nvlist_free(zhp->zpool_props);
free(zhp);
}
/*
* Return the name of the pool.
*/
const char *
zpool_get_name(zpool_handle_t *zhp)
{
return (zhp->zpool_name);
}
/*
* Return the state of the pool (ACTIVE or UNAVAILABLE)
*/
int
zpool_get_state(zpool_handle_t *zhp)
{
return (zhp->zpool_state);
}
/*
* Check if vdev list contains a special vdev
*/
static boolean_t
zpool_has_special_vdev(nvlist_t *nvroot)
{
nvlist_t **child;
uint_t children;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child,
&children) == 0) {
for (uint_t c = 0; c < children; c++) {
const char *bias;
if (nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0 &&
strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) {
return (B_TRUE);
}
}
}
return (B_FALSE);
}
/*
* Check if vdev list contains a dRAID vdev
*/
static boolean_t
zpool_has_draid_vdev(nvlist_t *nvroot)
{
nvlist_t **child;
uint_t children;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (uint_t c = 0; c < children; c++) {
const char *type;
if (nvlist_lookup_string(child[c],
ZPOOL_CONFIG_TYPE, &type) == 0 &&
strcmp(type, VDEV_TYPE_DRAID) == 0) {
return (B_TRUE);
}
}
}
return (B_FALSE);
}
/*
* Output a dRAID top-level vdev name in to the provided buffer.
*/
static char *
zpool_draid_name(char *name, int len, uint64_t data, uint64_t parity,
uint64_t spares, uint64_t children)
{
snprintf(name, len, "%s%llu:%llud:%lluc:%llus",
VDEV_TYPE_DRAID, (u_longlong_t)parity, (u_longlong_t)data,
(u_longlong_t)children, (u_longlong_t)spares);
return (name);
}
/*
* Return B_TRUE if the provided name is a dRAID spare name.
*/
boolean_t
zpool_is_draid_spare(const char *name)
{
uint64_t spare_id, parity, vdev_id;
if (sscanf(name, VDEV_TYPE_DRAID "%llu-%llu-%llu",
(u_longlong_t *)&parity, (u_longlong_t *)&vdev_id,
(u_longlong_t *)&spare_id) == 3) {
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Create the named pool, using the provided vdev list. It is assumed
* that the consumer has already validated the contents of the nvlist, so we
* don't have to worry about error semantics.
*/
int
zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
nvlist_t *props, nvlist_t *fsprops)
{
zfs_cmd_t zc = {"\0"};
nvlist_t *zc_fsprops = NULL;
nvlist_t *zc_props = NULL;
nvlist_t *hidden_args = NULL;
uint8_t *wkeydata = NULL;
uint_t wkeylen = 0;
char errbuf[ERRBUFLEN];
int ret = -1;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create '%s'"), pool);
if (!zpool_name_valid(hdl, B_FALSE, pool))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
zcmd_write_conf_nvlist(hdl, &zc, nvroot);
if (props) {
prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
if ((zc_props = zpool_valid_proplist(hdl, pool, props,
SPA_VERSION_1, flags, errbuf)) == NULL) {
goto create_failed;
}
}
if (fsprops) {
uint64_t zoned;
const char *zonestr;
zoned = ((nvlist_lookup_string(fsprops,
zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
strcmp(zonestr, "on") == 0);
if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
fsprops, zoned, NULL, NULL, B_TRUE, errbuf)) == NULL) {
goto create_failed;
}
if (nvlist_exists(zc_fsprops,
zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)) &&
!zpool_has_special_vdev(nvroot)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"%s property requires a special vdev"),
zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto create_failed;
}
if (!zc_props &&
(nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
goto create_failed;
}
if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE,
&wkeydata, &wkeylen) != 0) {
zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
goto create_failed;
}
if (nvlist_add_nvlist(zc_props,
ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
goto create_failed;
}
if (wkeydata != NULL) {
if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0)
goto create_failed;
if (nvlist_add_uint8_array(hidden_args, "wkeydata",
wkeydata, wkeylen) != 0)
goto create_failed;
if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS,
hidden_args) != 0)
goto create_failed;
}
}
if (zc_props)
zcmd_write_src_nvlist(hdl, &zc, zc_props);
(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
zcmd_free_nvlists(&zc);
nvlist_free(zc_props);
nvlist_free(zc_fsprops);
nvlist_free(hidden_args);
if (wkeydata != NULL)
free(wkeydata);
switch (errno) {
case EBUSY:
/*
* This can happen if the user has specified the same
* device multiple times. We can't reliably detect this
* until we try to add it and see we already have a
* label. This can also happen under if the device is
* part of an active md or lvm device.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more vdevs refer to the same device, or "
"one of\nthe devices is part of an active md or "
"lvm device"));
return (zfs_error(hdl, EZFS_BADDEV, errbuf));
case ERANGE:
/*
* This happens if the record size is smaller or larger
* than the allowed size range, or not a power of 2.
*
* NOTE: although zfs_valid_proplist is called earlier,
* this case may have slipped through since the
* pool does not exist yet and it is therefore
* impossible to read properties e.g. max blocksize
* from the pool.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"record size invalid"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
case EOVERFLOW:
/*
* This occurs when one of the devices is below
* SPA_MINDEVSIZE. Unfortunately, we can't detect which
* device was the problem device since there's no
* reliable way to determine device size from userland.
*/
{
char buf[64];
zfs_nicebytes(SPA_MINDEVSIZE, buf,
sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is less than the "
"minimum size (%s)"), buf);
}
return (zfs_error(hdl, EZFS_BADDEV, errbuf));
case ENOSPC:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is out of space"));
return (zfs_error(hdl, EZFS_BADDEV, errbuf));
case EINVAL:
if (zpool_has_draid_vdev(nvroot) &&
zfeature_lookup_name("draid", NULL) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dRAID vdevs are unsupported by the "
"kernel"));
return (zfs_error(hdl, EZFS_BADDEV, errbuf));
} else {
return (zpool_standard_error(hdl, errno,
errbuf));
}
default:
return (zpool_standard_error(hdl, errno, errbuf));
}
}
create_failed:
zcmd_free_nvlists(&zc);
nvlist_free(zc_props);
nvlist_free(zc_fsprops);
nvlist_free(hidden_args);
if (wkeydata != NULL)
free(wkeydata);
return (ret);
}
/*
* Destroy the given pool. It is up to the caller to ensure that there are no
* datasets left in the pool.
*/
int
zpool_destroy(zpool_handle_t *zhp, const char *log_str)
{
zfs_cmd_t zc = {"\0"};
zfs_handle_t *zfp = NULL;
libzfs_handle_t *hdl = zhp->zpool_hdl;
char errbuf[ERRBUFLEN];
if (zhp->zpool_state == POOL_STATE_ACTIVE &&
(zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
return (-1);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_history = (uint64_t)(uintptr_t)log_str;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot destroy '%s'"), zhp->zpool_name);
if (errno == EROFS) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is read only"));
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
} else {
(void) zpool_standard_error(hdl, errno, errbuf);
}
if (zfp)
zfs_close(zfp);
return (-1);
}
if (zfp) {
remove_mountpoint(zfp);
zfs_close(zfp);
}
return (0);
}
/*
* Create a checkpoint in the given pool.
*/
int
zpool_checkpoint(zpool_handle_t *zhp)
{
libzfs_handle_t *hdl = zhp->zpool_hdl;
char errbuf[ERRBUFLEN];
int error;
error = lzc_pool_checkpoint(zhp->zpool_name);
if (error != 0) {
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot checkpoint '%s'"), zhp->zpool_name);
(void) zpool_standard_error(hdl, error, errbuf);
return (-1);
}
return (0);
}
/*
* Discard the checkpoint from the given pool.
*/
int
zpool_discard_checkpoint(zpool_handle_t *zhp)
{
libzfs_handle_t *hdl = zhp->zpool_hdl;
char errbuf[ERRBUFLEN];
int error;
error = lzc_pool_checkpoint_discard(zhp->zpool_name);
if (error != 0) {
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot discard checkpoint in '%s'"), zhp->zpool_name);
(void) zpool_standard_error(hdl, error, errbuf);
return (-1);
}
return (0);
}
/*
* Add the given vdevs to the pool. The caller must have already performed the
* necessary verification to ensure that the vdev specification is well-formed.
*/
int
zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
{
zfs_cmd_t zc = {"\0"};
int ret;
libzfs_handle_t *hdl = zhp->zpool_hdl;
char errbuf[ERRBUFLEN];
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot add to '%s'"), zhp->zpool_name);
if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
SPA_VERSION_SPARES &&
nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
"upgraded to add hot spares"));
return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
}
if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
SPA_VERSION_L2CACHE &&
nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
"upgraded to add cache devices"));
return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
}
zcmd_write_conf_nvlist(hdl, &zc, nvroot);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
switch (errno) {
case EBUSY:
/*
* This can happen if the user has specified the same
* device multiple times. We can't reliably detect this
* until we try to add it and see we already have a
* label.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more vdevs refer to the same device"));
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case EINVAL:
if (zpool_has_draid_vdev(nvroot) &&
zfeature_lookup_name("draid", NULL) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dRAID vdevs are unsupported by the "
"kernel"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid config; a pool with removing/"
"removed vdevs does not support adding "
"raidz or dRAID vdevs"));
}
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case EOVERFLOW:
/*
* This occurs when one of the devices is below
* SPA_MINDEVSIZE. Unfortunately, we can't detect which
* device was the problem device since there's no
* reliable way to determine device size from userland.
*/
{
char buf[64];
zfs_nicebytes(SPA_MINDEVSIZE, buf,
sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"device is less than the minimum "
"size (%s)"), buf);
}
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to add these vdevs"));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
default:
(void) zpool_standard_error(hdl, errno, errbuf);
}
ret = -1;
} else {
ret = 0;
}
zcmd_free_nvlists(&zc);
return (ret);
}
/*
* Exports the pool from the system. The caller must ensure that there are no
* mounted datasets in the pool.
*/
static int
zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
const char *log_str)
{
zfs_cmd_t zc = {"\0"};
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_cookie = force;
zc.zc_guid = hardforce;
zc.zc_history = (uint64_t)(uintptr_t)log_str;
if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
switch (errno) {
case EXDEV:
zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
"use '-f' to override the following errors:\n"
"'%s' has an active shared spare which could be"
" used by other pools once '%s' is exported."),
zhp->zpool_name, zhp->zpool_name);
return (zfs_error_fmt(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
dgettext(TEXT_DOMAIN, "cannot export '%s'"),
zhp->zpool_name));
default:
return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
dgettext(TEXT_DOMAIN, "cannot export '%s'"),
zhp->zpool_name));
}
}
return (0);
}
int
zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
{
return (zpool_export_common(zhp, force, B_FALSE, log_str));
}
int
zpool_export_force(zpool_handle_t *zhp, const char *log_str)
{
return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
}
static void
zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
nvlist_t *config)
{
nvlist_t *nv = NULL;
uint64_t rewindto;
int64_t loss = -1;
struct tm t;
char timestr[128];
if (!hdl->libzfs_printerr || config == NULL)
return;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
return;
}
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
return;
(void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
if (localtime_r((time_t *)&rewindto, &t) != NULL &&
strftime(timestr, 128, "%c", &t) != 0) {
if (dryrun) {
(void) printf(dgettext(TEXT_DOMAIN,
"Would be able to return %s "
"to its state as of %s.\n"),
name, timestr);
} else {
(void) printf(dgettext(TEXT_DOMAIN,
"Pool %s returned to its state as of %s.\n"),
name, timestr);
}
if (loss > 120) {
(void) printf(dgettext(TEXT_DOMAIN,
"%s approximately %lld "),
dryrun ? "Would discard" : "Discarded",
((longlong_t)loss + 30) / 60);
(void) printf(dgettext(TEXT_DOMAIN,
"minutes of transactions.\n"));
} else if (loss > 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"%s approximately %lld "),
dryrun ? "Would discard" : "Discarded",
(longlong_t)loss);
(void) printf(dgettext(TEXT_DOMAIN,
"seconds of transactions.\n"));
}
}
}
void
zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
nvlist_t *config)
{
nvlist_t *nv = NULL;
int64_t loss = -1;
uint64_t edata = UINT64_MAX;
uint64_t rewindto;
struct tm t;
char timestr[128];
if (!hdl->libzfs_printerr)
return;
if (reason >= 0)
(void) printf(dgettext(TEXT_DOMAIN, "action: "));
else
(void) printf(dgettext(TEXT_DOMAIN, "\t"));
/* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
goto no_info;
(void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
&edata);
(void) printf(dgettext(TEXT_DOMAIN,
"Recovery is possible, but will result in some data loss.\n"));
if (localtime_r((time_t *)&rewindto, &t) != NULL &&
strftime(timestr, 128, "%c", &t) != 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"\tReturning the pool to its state as of %s\n"
"\tshould correct the problem. "),
timestr);
} else {
(void) printf(dgettext(TEXT_DOMAIN,
"\tReverting the pool to an earlier state "
"should correct the problem.\n\t"));
}
if (loss > 120) {
(void) printf(dgettext(TEXT_DOMAIN,
"Approximately %lld minutes of data\n"
"\tmust be discarded, irreversibly. "),
((longlong_t)loss + 30) / 60);
} else if (loss > 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"Approximately %lld seconds of data\n"
"\tmust be discarded, irreversibly. "),
(longlong_t)loss);
}
if (edata != 0 && edata != UINT64_MAX) {
if (edata == 1) {
(void) printf(dgettext(TEXT_DOMAIN,
"After rewind, at least\n"
"\tone persistent user-data error will remain. "));
} else {
(void) printf(dgettext(TEXT_DOMAIN,
"After rewind, several\n"
"\tpersistent user-data errors will remain. "));
}
}
(void) printf(dgettext(TEXT_DOMAIN,
"Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
reason >= 0 ? "clear" : "import", name);
(void) printf(dgettext(TEXT_DOMAIN,
"A scrub of the pool\n"
"\tis strongly recommended after recovery.\n"));
return;
no_info:
(void) printf(dgettext(TEXT_DOMAIN,
"Destroy and re-create the pool from\n\ta backup source.\n"));
}
/*
* zpool_import() is a contracted interface. Should be kept the same
* if possible.
*
* Applications should use zpool_import_props() to import a pool with
* new properties value to be set.
*/
int
zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
char *altroot)
{
nvlist_t *props = NULL;
int ret;
if (altroot != NULL) {
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
return (zfs_error_fmt(hdl, EZFS_NOMEM,
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
newname));
}
if (nvlist_add_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
nvlist_add_string(props,
zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
nvlist_free(props);
return (zfs_error_fmt(hdl, EZFS_NOMEM,
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
newname));
}
}
ret = zpool_import_props(hdl, config, newname, props,
ZFS_IMPORT_NORMAL);
nvlist_free(props);
return (ret);
}
static void
print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
int indent)
{
nvlist_t **child;
uint_t c, children;
char *vname;
uint64_t is_log = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
&is_log);
if (name != NULL)
(void) printf("\t%*s%s%s\n", indent, "", name,
is_log ? " [log]" : "");
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return;
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
print_vdev_tree(hdl, vname, child[c], indent + 2);
free(vname);
}
}
void
zpool_print_unsup_feat(nvlist_t *config)
{
nvlist_t *nvinfo, *unsup_feat;
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
unsup_feat = fnvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT);
for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL);
nvp != NULL; nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
const char *desc = fnvpair_value_string(nvp);
if (strlen(desc) > 0)
(void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
else
(void) printf("\t%s\n", nvpair_name(nvp));
}
}
/*
* Import the given pool using the known configuration and a list of
* properties to be set. The configuration should have come from
* zpool_find_import(). The 'newname' parameters control whether the pool
* is imported with a different name.
*/
int
zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
nvlist_t *props, int flags)
{
zfs_cmd_t zc = {"\0"};
zpool_load_policy_t policy;
nvlist_t *nv = NULL;
nvlist_t *nvinfo = NULL;
nvlist_t *missing = NULL;
const char *thename;
const char *origname;
int ret;
int error = 0;
char errbuf[ERRBUFLEN];
origname = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot import pool '%s'"), origname);
if (newname != NULL) {
if (!zpool_name_valid(hdl, B_FALSE, newname))
return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
newname));
thename = newname;
} else {
thename = origname;
}
if (props != NULL) {
uint64_t version;
prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
if ((props = zpool_valid_proplist(hdl, origname,
props, version, flags, errbuf)) == NULL)
return (-1);
zcmd_write_src_nvlist(hdl, &zc, props);
nvlist_free(props);
}
(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
zc.zc_guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);
zcmd_write_conf_nvlist(hdl, &zc, config);
zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2);
zc.zc_cookie = flags;
while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
errno == ENOMEM)
zcmd_expand_dst_nvlist(hdl, &zc);
if (ret != 0)
error = errno;
(void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
zcmd_free_nvlists(&zc);
zpool_get_load_policy(config, &policy);
if (error) {
char desc[1024];
char aux[256];
/*
* Dry-run failed, but we print out what success
* looks like if we found a best txg
*/
if (policy.zlp_rewind & ZPOOL_TRY_REWIND) {
zpool_rewind_exclaim(hdl, newname ? origname : thename,
B_TRUE, nv);
nvlist_free(nv);
return (-1);
}
if (newname == NULL)
(void) snprintf(desc, sizeof (desc),
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
thename);
else
(void) snprintf(desc, sizeof (desc),
dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
origname, thename);
switch (error) {
case ENOTSUP:
if (nv != NULL && nvlist_lookup_nvlist(nv,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
(void) printf(dgettext(TEXT_DOMAIN, "This "
"pool uses the following feature(s) not "
"supported by this system:\n"));
zpool_print_unsup_feat(nv);
if (nvlist_exists(nvinfo,
ZPOOL_CONFIG_CAN_RDONLY)) {
(void) printf(dgettext(TEXT_DOMAIN,
"All unsupported features are only "
"required for writing to the pool."
"\nThe pool can be imported using "
"'-o readonly=on'.\n"));
}
}
/*
* Unsupported version.
*/
(void) zfs_error(hdl, EZFS_BADVERSION, desc);
break;
case EREMOTEIO:
if (nv != NULL && nvlist_lookup_nvlist(nv,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
const char *hostname = "<unknown>";
uint64_t hostid = 0;
mmp_state_t mmp_state;
mmp_state = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_STATE);
if (nvlist_exists(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME))
hostname = fnvlist_lookup_string(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME);
if (nvlist_exists(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID))
hostid = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID);
if (mmp_state == MMP_STATE_ACTIVE) {
(void) snprintf(aux, sizeof (aux),
dgettext(TEXT_DOMAIN, "pool is imp"
"orted on host '%s' (hostid=%lx).\n"
"Export the pool on the other "
"system, then run 'zpool import'."),
hostname, (unsigned long) hostid);
} else if (mmp_state == MMP_STATE_NO_HOSTID) {
(void) snprintf(aux, sizeof (aux),
dgettext(TEXT_DOMAIN, "pool has "
"the multihost property on and "
"the\nsystem's hostid is not set. "
"Set a unique system hostid with "
"the zgenhostid(8) command.\n"));
}
(void) zfs_error_aux(hdl, "%s", aux);
}
(void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);
break;
case EINVAL:
(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
break;
case EROFS:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is read only"));
(void) zfs_error(hdl, EZFS_BADDEV, desc);
break;
case ENXIO:
if (nv && nvlist_lookup_nvlist(nv,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
nvlist_lookup_nvlist(nvinfo,
ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"The devices below are missing or "
"corrupted, use '-m' to import the pool "
"anyway:\n"));
print_vdev_tree(hdl, NULL, missing, 2);
(void) printf("\n");
}
(void) zpool_standard_error(hdl, error, desc);
break;
case EEXIST:
(void) zpool_standard_error(hdl, error, desc);
break;
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices are already in use\n"));
(void) zfs_error(hdl, EZFS_BADDEV, desc);
break;
case ENAMETOOLONG:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new name of at least one dataset is longer than "
"the maximum allowable length"));
(void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
break;
default:
(void) zpool_standard_error(hdl, error, desc);
zpool_explain_recover(hdl,
newname ? origname : thename, -error, nv);
break;
}
nvlist_free(nv);
ret = -1;
} else {
zpool_handle_t *zhp;
/*
* This should never fail, but play it safe anyway.
*/
if (zpool_open_silent(hdl, thename, &zhp) != 0)
ret = -1;
else if (zhp != NULL)
zpool_close(zhp);
if (policy.zlp_rewind &
(ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
zpool_rewind_exclaim(hdl, newname ? origname : thename,
((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv);
}
nvlist_free(nv);
}
return (ret);
}
/*
* Translate vdev names to guids. If a vdev_path is determined to be
* unsuitable then a vd_errlist is allocated and the vdev path and errno
* are added to it.
*/
static int
zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds,
nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist)
{
nvlist_t *errlist = NULL;
int error = 0;
for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL;
elem = nvlist_next_nvpair(vds, elem)) {
boolean_t spare, cache;
const char *vd_path = nvpair_name(elem);
nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache,
NULL);
if ((tgt == NULL) || cache || spare) {
if (errlist == NULL) {
errlist = fnvlist_alloc();
error = EINVAL;
}
uint64_t err = (tgt == NULL) ? EZFS_NODEVICE :
(spare ? EZFS_ISSPARE : EZFS_ISL2CACHE);
fnvlist_add_int64(errlist, vd_path, err);
continue;
}
uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
fnvlist_add_uint64(vdev_guids, vd_path, guid);
char msg[MAXNAMELEN];
(void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid);
fnvlist_add_string(guids_to_paths, msg, vd_path);
}
if (error != 0) {
verify(errlist != NULL);
if (vd_errlist != NULL)
*vd_errlist = errlist;
else
fnvlist_free(errlist);
}
return (error);
}
static int
xlate_init_err(int err)
{
switch (err) {
case ENODEV:
return (EZFS_NODEVICE);
case EINVAL:
case EROFS:
return (EZFS_BADDEV);
case EBUSY:
return (EZFS_INITIALIZING);
case ESRCH:
return (EZFS_NO_INITIALIZE);
}
return (err);
}
/*
* Begin, suspend, cancel, or uninit (clear) the initialization (initializing
* of all free blocks) for the given vdevs in the given pool.
*/
static int
zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
nvlist_t *vds, boolean_t wait)
{
int err;
nvlist_t *vdev_guids = fnvlist_alloc();
nvlist_t *guids_to_paths = fnvlist_alloc();
nvlist_t *vd_errlist = NULL;
nvlist_t *errlist;
nvpair_t *elem;
err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
guids_to_paths, &vd_errlist);
if (err != 0) {
verify(vd_errlist != NULL);
goto list_errors;
}
err = lzc_initialize(zhp->zpool_name, cmd_type,
vdev_guids, &errlist);
if (err != 0) {
if (errlist != NULL && nvlist_lookup_nvlist(errlist,
ZPOOL_INITIALIZE_VDEVS, &vd_errlist) == 0) {
goto list_errors;
}
if (err == EINVAL && cmd_type == POOL_INITIALIZE_UNINIT) {
zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
"uninitialize is not supported by kernel"));
}
(void) zpool_standard_error(zhp->zpool_hdl, err,
dgettext(TEXT_DOMAIN, "operation failed"));
goto out;
}
if (wait) {
for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
elem = nvlist_next_nvpair(vdev_guids, elem)) {
uint64_t guid = fnvpair_value_uint64(elem);
err = lzc_wait_tag(zhp->zpool_name,
ZPOOL_WAIT_INITIALIZE, guid, NULL);
if (err != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl,
err, dgettext(TEXT_DOMAIN, "error "
"waiting for '%s' to initialize"),
nvpair_name(elem));
goto out;
}
}
}
goto out;
list_errors:
for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL;
elem = nvlist_next_nvpair(vd_errlist, elem)) {
int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem));
const char *path;
if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
&path) != 0)
path = nvpair_name(elem);
(void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
"cannot initialize '%s'", path);
}
out:
fnvlist_free(vdev_guids);
fnvlist_free(guids_to_paths);
if (vd_errlist != NULL)
fnvlist_free(vd_errlist);
return (err == 0 ? 0 : -1);
}
int
zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
nvlist_t *vds)
{
return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE));
}
int
zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
nvlist_t *vds)
{
return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE));
}
static int
xlate_trim_err(int err)
{
switch (err) {
case ENODEV:
return (EZFS_NODEVICE);
case EINVAL:
case EROFS:
return (EZFS_BADDEV);
case EBUSY:
return (EZFS_TRIMMING);
case ESRCH:
return (EZFS_NO_TRIM);
case EOPNOTSUPP:
return (EZFS_TRIM_NOTSUP);
}
return (err);
}
static int
zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids)
{
int err;
nvpair_t *elem;
for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
elem = nvlist_next_nvpair(vdev_guids, elem)) {
uint64_t guid = fnvpair_value_uint64(elem);
err = lzc_wait_tag(zhp->zpool_name,
ZPOOL_WAIT_TRIM, guid, NULL);
if (err != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl,
err, dgettext(TEXT_DOMAIN, "error "
"waiting to trim '%s'"), nvpair_name(elem));
return (err);
}
}
return (0);
}
/*
* Check errlist and report any errors, omitting ones which should be
* suppressed. Returns B_TRUE if any errors were reported.
*/
static boolean_t
check_trim_errs(zpool_handle_t *zhp, trimflags_t *trim_flags,
nvlist_t *guids_to_paths, nvlist_t *vds, nvlist_t *errlist)
{
nvpair_t *elem;
boolean_t reported_errs = B_FALSE;
int num_vds = 0;
int num_suppressed_errs = 0;
for (elem = nvlist_next_nvpair(vds, NULL);
elem != NULL; elem = nvlist_next_nvpair(vds, elem)) {
num_vds++;
}
for (elem = nvlist_next_nvpair(errlist, NULL);
elem != NULL; elem = nvlist_next_nvpair(errlist, elem)) {
int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem));
const char *path;
/*
* If only the pool was specified, and it was not a secure
* trim then suppress warnings for individual vdevs which
* do not support trimming.
*/
if (vd_error == EZFS_TRIM_NOTSUP &&
trim_flags->fullpool &&
!trim_flags->secure) {
num_suppressed_errs++;
continue;
}
reported_errs = B_TRUE;
if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
&path) != 0)
path = nvpair_name(elem);
(void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
"cannot trim '%s'", path);
}
if (num_suppressed_errs == num_vds) {
(void) zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
"no devices in pool support trim operations"));
(void) (zfs_error(zhp->zpool_hdl, EZFS_TRIM_NOTSUP,
dgettext(TEXT_DOMAIN, "cannot trim")));
reported_errs = B_TRUE;
}
return (reported_errs);
}
/*
* Begin, suspend, or cancel the TRIM (discarding of all free blocks) for
* the given vdevs in the given pool.
*/
int
zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds,
trimflags_t *trim_flags)
{
int err;
int retval = 0;
nvlist_t *vdev_guids = fnvlist_alloc();
nvlist_t *guids_to_paths = fnvlist_alloc();
nvlist_t *errlist = NULL;
err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
guids_to_paths, &errlist);
if (err != 0) {
check_trim_errs(zhp, trim_flags, guids_to_paths, vds, errlist);
retval = -1;
goto out;
}
err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate,
trim_flags->secure, vdev_guids, &errlist);
if (err != 0) {
nvlist_t *vd_errlist;
if (errlist != NULL && nvlist_lookup_nvlist(errlist,
ZPOOL_TRIM_VDEVS, &vd_errlist) == 0) {
if (check_trim_errs(zhp, trim_flags, guids_to_paths,
vds, vd_errlist)) {
retval = -1;
goto out;
}
} else {
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "operation failed"));
zpool_standard_error(zhp->zpool_hdl, err, errbuf);
retval = -1;
goto out;
}
}
if (trim_flags->wait)
retval = zpool_trim_wait(zhp, vdev_guids);
out:
if (errlist != NULL)
fnvlist_free(errlist);
fnvlist_free(vdev_guids);
fnvlist_free(guids_to_paths);
return (retval);
}
/*
* Scan the pool.
*/
int
zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
{
char errbuf[ERRBUFLEN];
int err;
libzfs_handle_t *hdl = zhp->zpool_hdl;
nvlist_t *args = fnvlist_alloc();
fnvlist_add_uint64(args, "scan_type", (uint64_t)func);
fnvlist_add_uint64(args, "scan_command", (uint64_t)cmd);
err = lzc_scrub(ZFS_IOC_POOL_SCRUB, zhp->zpool_name, args, NULL);
fnvlist_free(args);
if (err == 0) {
return (0);
} else if (err == ZFS_ERR_IOC_CMD_UNAVAIL) {
zfs_cmd_t zc = {"\0"};
(void) strlcpy(zc.zc_name, zhp->zpool_name,
sizeof (zc.zc_name));
zc.zc_cookie = func;
zc.zc_flags = cmd;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
return (0);
}
/*
* An ECANCELED on a scrub means one of the following:
* 1. we resumed a paused scrub.
* 2. we resumed a paused error scrub.
* 3. Error scrub is not run because of no error log.
*/
if (err == ECANCELED && (func == POOL_SCAN_SCRUB ||
func == POOL_SCAN_ERRORSCRUB) && cmd == POOL_SCRUB_NORMAL)
return (0);
/*
* The following cases have been handled here:
* 1. Paused a scrub/error scrub if there is none in progress.
*/
if (err == ENOENT && func != POOL_SCAN_NONE && cmd ==
POOL_SCRUB_PAUSE) {
return (0);
}
ASSERT3U(func, >=, POOL_SCAN_NONE);
ASSERT3U(func, <, POOL_SCAN_FUNCS);
if (func == POOL_SCAN_SCRUB || func == POOL_SCAN_ERRORSCRUB) {
if (cmd == POOL_SCRUB_PAUSE) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot pause scrubbing %s"),
zhp->zpool_name);
} else {
assert(cmd == POOL_SCRUB_NORMAL);
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot scrub %s"),
zhp->zpool_name);
}
} else if (func == POOL_SCAN_RESILVER) {
assert(cmd == POOL_SCRUB_NORMAL);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot restart resilver on %s"), zhp->zpool_name);
} else if (func == POOL_SCAN_NONE) {
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot cancel scrubbing %s"), zhp->zpool_name);
} else {
assert(!"unexpected result");
}
/*
* With EBUSY, five cases are possible:
*
* Current state Requested
* 1. Normal Scrub Running Normal Scrub or Error Scrub
* 2. Normal Scrub Paused Error Scrub
* 3. Normal Scrub Paused Pause Normal Scrub
* 4. Error Scrub Running Normal Scrub or Error Scrub
* 5. Error Scrub Paused Pause Error Scrub
* 6. Resilvering Anything else
*/
if (err == EBUSY) {
nvlist_t *nvroot;
pool_scan_stat_t *ps = NULL;
uint_t psc;
nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
if (ps && ps->pss_func == POOL_SCAN_SCRUB &&
ps->pss_state == DSS_SCANNING) {
if (ps->pss_pass_scrub_pause == 0) {
/* handles case 1 */
assert(cmd == POOL_SCRUB_NORMAL);
return (zfs_error(hdl, EZFS_SCRUBBING,
errbuf));
} else {
if (func == POOL_SCAN_ERRORSCRUB) {
/* handles case 2 */
ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL);
return (zfs_error(hdl,
EZFS_SCRUB_PAUSED_TO_CANCEL,
errbuf));
} else {
/* handles case 3 */
ASSERT3U(func, ==, POOL_SCAN_SCRUB);
ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE);
return (zfs_error(hdl,
EZFS_SCRUB_PAUSED, errbuf));
}
}
} else if (ps &&
ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB &&
ps->pss_error_scrub_state == DSS_ERRORSCRUBBING) {
if (ps->pss_pass_error_scrub_pause == 0) {
/* handles case 4 */
ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL);
return (zfs_error(hdl, EZFS_ERRORSCRUBBING,
errbuf));
} else {
/* handles case 5 */
ASSERT3U(func, ==, POOL_SCAN_ERRORSCRUB);
ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE);
return (zfs_error(hdl, EZFS_ERRORSCRUB_PAUSED,
errbuf));
}
} else {
/* handles case 6 */
return (zfs_error(hdl, EZFS_RESILVERING, errbuf));
}
} else if (err == ENOENT) {
return (zfs_error(hdl, EZFS_NO_SCRUB, errbuf));
} else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) {
return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, errbuf));
} else {
return (zpool_standard_error(hdl, err, errbuf));
}
}
/*
* Find a vdev that matches the search criteria specified. We use the
* the nvpair name to determine how we should look for the device.
* 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
* spare; but FALSE if its an INUSE spare.
*/
static nvlist_t *
vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
boolean_t *l2cache, boolean_t *log)
{
uint_t c, children;
nvlist_t **child;
nvlist_t *ret;
uint64_t is_log;
const char *srchkey;
nvpair_t *pair = nvlist_next_nvpair(search, NULL);
/* Nothing to look for */
if (search == NULL || pair == NULL)
return (NULL);
/* Obtain the key we will use to search */
srchkey = nvpair_name(pair);
switch (nvpair_type(pair)) {
case DATA_TYPE_UINT64:
if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
uint64_t srchval = fnvpair_value_uint64(pair);
uint64_t theguid = fnvlist_lookup_uint64(nv,
ZPOOL_CONFIG_GUID);
if (theguid == srchval)
return (nv);
}
break;
case DATA_TYPE_STRING: {
const char *srchval, *val;
srchval = fnvpair_value_string(pair);
if (nvlist_lookup_string(nv, srchkey, &val) != 0)
break;
/*
* Search for the requested value. Special cases:
*
* - ZPOOL_CONFIG_PATH for whole disk entries. These end in
* "-part1", or "p1". The suffix is hidden from the user,
* but included in the string, so this matches around it.
* - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
* is used to check all possible expanded paths.
* - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
*
* Otherwise, all other searches are simple string compares.
*/
if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
uint64_t wholedisk = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk);
if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
return (nv);
} else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0) {
char *type, *idx, *end, *p;
uint64_t id, vdev_id;
/*
* Determine our vdev type, keeping in mind
* that the srchval is composed of a type and
* vdev id pair (i.e. mirror-4).
*/
if ((type = strdup(srchval)) == NULL)
return (NULL);
if ((p = strrchr(type, '-')) == NULL) {
free(type);
break;
}
idx = p + 1;
*p = '\0';
/*
* If the types don't match then keep looking.
*/
if (strncmp(val, type, strlen(val)) != 0) {
free(type);
break;
}
verify(zpool_vdev_is_interior(type));
id = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID);
errno = 0;
vdev_id = strtoull(idx, &end, 10);
/*
* If we are looking for a raidz and a parity is
* specified, make sure it matches.
*/
int rzlen = strlen(VDEV_TYPE_RAIDZ);
assert(rzlen == strlen(VDEV_TYPE_DRAID));
int typlen = strlen(type);
if ((strncmp(type, VDEV_TYPE_RAIDZ, rzlen) == 0 ||
strncmp(type, VDEV_TYPE_DRAID, rzlen) == 0) &&
typlen != rzlen) {
uint64_t vdev_parity;
int parity = *(type + rzlen) - '0';
if (parity <= 0 || parity > 3 ||
(typlen - rzlen) != 1) {
/*
* Nonsense parity specified, can
* never match
*/
free(type);
return (NULL);
}
vdev_parity = fnvlist_lookup_uint64(nv,
ZPOOL_CONFIG_NPARITY);
if ((int)vdev_parity != parity) {
free(type);
break;
}
}
free(type);
if (errno != 0)
return (NULL);
/*
* Now verify that we have the correct vdev id.
*/
if (vdev_id == id)
return (nv);
}
/*
* Common case
*/
if (strcmp(srchval, val) == 0)
return (nv);
break;
}
default:
break;
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return (NULL);
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search,
avail_spare, l2cache, NULL)) != NULL) {
/*
* The 'is_log' value is only set for the toplevel
* vdev, not the leaf vdevs. So we always lookup the
* log device from the root of the vdev tree (where
* 'log' is non-NULL).
*/
if (log != NULL &&
nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
is_log) {
*log = B_TRUE;
}
return (ret);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search,
avail_spare, l2cache, NULL)) != NULL) {
*avail_spare = B_TRUE;
return (ret);
}
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search,
avail_spare, l2cache, NULL)) != NULL) {
*l2cache = B_TRUE;
return (ret);
}
}
}
return (NULL);
}
/*
* Given a physical path or guid, find the associated vdev.
*/
nvlist_t *
zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
{
nvlist_t *search, *nvroot, *ret;
uint64_t guid;
char *end;
search = fnvlist_alloc();
guid = strtoull(ppath, &end, 0);
if (guid != 0 && *end == '\0') {
fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);
} else {
fnvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath);
}
nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
ZPOOL_CONFIG_VDEV_TREE);
*avail_spare = B_FALSE;
*l2cache = B_FALSE;
if (log != NULL)
*log = B_FALSE;
ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
fnvlist_free(search);
return (ret);
}
/*
* Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
*/
static boolean_t
zpool_vdev_is_interior(const char *name)
{
if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
strncmp(name,
VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
strncmp(name, VDEV_TYPE_ROOT, strlen(VDEV_TYPE_ROOT)) == 0 ||
strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
return (B_TRUE);
if (strncmp(name, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0 &&
!zpool_is_draid_spare(name))
return (B_TRUE);
return (B_FALSE);
}
nvlist_t *
zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
boolean_t *l2cache, boolean_t *log)
{
char *end;
nvlist_t *nvroot, *search, *ret;
uint64_t guid;
search = fnvlist_alloc();
guid = strtoull(path, &end, 0);
if (guid != 0 && *end == '\0') {
fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);
} else if (zpool_vdev_is_interior(path)) {
fnvlist_add_string(search, ZPOOL_CONFIG_TYPE, path);
} else {
fnvlist_add_string(search, ZPOOL_CONFIG_PATH, path);
}
nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
ZPOOL_CONFIG_VDEV_TREE);
*avail_spare = B_FALSE;
*l2cache = B_FALSE;
if (log != NULL)
*log = B_FALSE;
ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
fnvlist_free(search);
return (ret);
}
/*
* Convert a vdev path to a GUID. Returns GUID or 0 on error.
*
* If is_spare, is_l2cache, or is_log is non-NULL, then store within it
* if the VDEV is a spare, l2cache, or log device. If they're NULL then
* ignore them.
*/
static uint64_t
zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
{
boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
nvlist_t *tgt;
if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,
&log)) == NULL)
return (0);
if (is_spare != NULL)
*is_spare = spare;
if (is_l2cache != NULL)
*is_l2cache = l2cache;
if (is_log != NULL)
*is_log = log;
return (fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID));
}
/* Convert a vdev path to a GUID. Returns GUID or 0 on error. */
uint64_t
zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)
{
return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));
}
/*
* Bring the specified vdev online. The 'flags' parameter is a set of the
* ZFS_ONLINE_* flags.
*/
int
zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
vdev_state_t *newstate)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
libzfs_handle_t *hdl = zhp->zpool_hdl;
if (flags & ZFS_ONLINE_EXPAND) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
} else {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot online %s"), path);
}
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
&islog)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
if (!(flags & ZFS_ONLINE_SPARE) && avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
#ifndef __FreeBSD__
const char *pathname;
if ((flags & ZFS_ONLINE_EXPAND ||
zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
uint64_t wholedisk = 0;
(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk);
/*
* XXX - L2ARC 1.0 devices can't support expansion.
*/
if (l2cache) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot expand cache devices"));
return (zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf));
}
if (wholedisk) {
const char *fullpath = path;
char buf[MAXPATHLEN];
int error;
if (path[0] != '/') {
error = zfs_resolve_shortname(path, buf,
sizeof (buf));
if (error != 0)
return (zfs_error(hdl, EZFS_NODEVICE,
errbuf));
fullpath = buf;
}
error = zpool_relabel_disk(hdl, fullpath, errbuf);
if (error != 0)
return (error);
}
}
#endif
zc.zc_cookie = VDEV_STATE_ONLINE;
zc.zc_obj = flags;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
if (errno == EINVAL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
"from this pool into a new one. Use '%s' "
"instead"), "zpool detach");
return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, errbuf));
}
return (zpool_standard_error(hdl, errno, errbuf));
}
*newstate = zc.zc_cookie;
return (0);
}
/*
* Take the specified vdev offline
*/
int
zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
nvlist_t *tgt;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
NULL)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
zc.zc_cookie = VDEV_STATE_OFFLINE;
zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
switch (errno) {
case EBUSY:
/*
* There are no other replicas of this device.
*/
return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf));
case EEXIST:
/*
* The log device has unplayed logs
*/
return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, errbuf));
default:
return (zpool_standard_error(hdl, errno, errbuf));
}
}
/*
* Remove the specified vdev asynchronously from the configuration, so
* that it may come ONLINE if reinserted. This is called from zed on
* Udev remove event.
* Note: We also have a similar function zpool_vdev_remove() that
* removes the vdev from the pool.
*/
int
zpool_vdev_remove_wanted(zpool_handle_t *zhp, const char *path)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
nvlist_t *tgt;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
NULL)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
zc.zc_cookie = VDEV_STATE_REMOVED;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, errbuf));
}
/*
* Mark the given vdev faulted.
*/
int
zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_guid = guid;
zc.zc_cookie = VDEV_STATE_FAULTED;
zc.zc_obj = aux;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
switch (errno) {
case EBUSY:
/*
* There are no other replicas of this device.
*/
return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf));
default:
return (zpool_standard_error(hdl, errno, errbuf));
}
}
/*
* Mark the given vdev degraded.
*/
int
zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_guid = guid;
zc.zc_cookie = VDEV_STATE_DEGRADED;
zc.zc_obj = aux;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, errbuf));
}
/*
* Returns TRUE if the given nvlist is a vdev that was originally swapped in as
* a hot spare.
*/
static boolean_t
is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
{
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
&children) == 0) {
const char *type = fnvlist_lookup_string(search,
ZPOOL_CONFIG_TYPE);
if ((strcmp(type, VDEV_TYPE_SPARE) == 0 ||
strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0) &&
children == 2 && child[which] == tgt)
return (B_TRUE);
for (c = 0; c < children; c++)
if (is_replacing_spare(child[c], tgt, which))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Attach new_disk (fully described by nvroot) to old_disk.
* If 'replacing' is specified, the new disk will replace the old one.
*/
int
zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk,
const char *new_disk, nvlist_t *nvroot, int replacing, boolean_t rebuild)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
int ret;
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
uint64_t val;
char *newname;
nvlist_t **child;
uint_t children;
nvlist_t *config_root;
libzfs_handle_t *hdl = zhp->zpool_hdl;
if (replacing)
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot replace %s with %s"), old_disk, new_disk);
else
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot attach %s to %s"), new_disk, old_disk);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
&islog)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
if (l2cache)
return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf));
zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
zc.zc_cookie = replacing;
zc.zc_simple = rebuild;
if (rebuild &&
zfeature_lookup_guid("org.openzfs:device_rebuild", NULL) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"the loaded zfs module doesn't support device rebuilds"));
return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0 || children != 1) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new device must be a single disk"));
return (zfs_error(hdl, EZFS_INVALCONFIG, errbuf));
}
config_root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE);
if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
return (-1);
/*
* If the target is a hot spare that has been swapped in, we can only
* replace it with another hot spare.
*/
if (replacing &&
nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
(zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
NULL) == NULL || !avail_spare) &&
is_replacing_spare(config_root, tgt, 1)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"can only be replaced by another hot spare"));
free(newname);
return (zfs_error(hdl, EZFS_BADTARGET, errbuf));
}
free(newname);
zcmd_write_conf_nvlist(hdl, &zc, nvroot);
ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
zcmd_free_nvlists(&zc);
if (ret == 0)
return (0);
switch (errno) {
case ENOTSUP:
/*
* Can't attach to or replace this type of vdev.
*/
if (replacing) {
uint64_t version = zpool_get_prop_int(zhp,
ZPOOL_PROP_VERSION, NULL);
if (islog) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot replace a log with a spare"));
} else if (rebuild) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"only mirror and dRAID vdevs support "
"sequential reconstruction"));
} else if (zpool_is_draid_spare(new_disk)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dRAID spares can only replace child "
"devices in their parent's dRAID vdev"));
} else if (version >= SPA_VERSION_MULTI_REPLACE) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"already in replacing/spare config; wait "
"for completion or use 'zpool detach'"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot replace a replacing device"));
}
} else {
char status[64] = {0};
zpool_prop_get_feature(zhp,
"feature@device_rebuild", status, 63);
if (rebuild &&
strncmp(status, ZFS_FEATURE_DISABLED, 64) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"device_rebuild feature must be enabled "
"in order to use sequential "
"reconstruction"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"can only attach to mirrors and top-level "
"disks"));
}
}
(void) zfs_error(hdl, EZFS_BADTARGET, errbuf);
break;
case EINVAL:
/*
* The new device must be a single disk.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new device must be a single disk"));
(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
break;
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, "
"or device removal is in progress"),
new_disk);
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case EOVERFLOW:
/*
* The new device is too small.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"device is too small"));
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case EDOM:
/*
* The new device has a different optimal sector size.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new device has a different optimal sector size; use the "
"option '-o ashift=N' to override the optimal size"));
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case ENAMETOOLONG:
/*
* The resulting top-level vdev spec won't fit in the label.
*/
(void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
break;
default:
(void) zpool_standard_error(hdl, errno, errbuf);
}
return (-1);
}
/*
* Detach the specified device.
*/
int
zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
nvlist_t *tgt;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
NULL)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
if (l2cache)
return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf));
zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
return (0);
switch (errno) {
case ENOTSUP:
/*
* Can't detach from this type of vdev.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
"applicable to mirror and replacing vdevs"));
(void) zfs_error(hdl, EZFS_BADTARGET, errbuf);
break;
case EBUSY:
/*
* There are no other replicas of this device.
*/
(void) zfs_error(hdl, EZFS_NOREPLICAS, errbuf);
break;
default:
(void) zpool_standard_error(hdl, errno, errbuf);
}
return (-1);
}
/*
* Find a mirror vdev in the source nvlist.
*
* The mchild array contains a list of disks in one of the top-level mirrors
* of the source pool. The schild array contains a list of disks that the
* user specified on the command line. We loop over the mchild array to
* see if any entry in the schild array matches.
*
* If a disk in the mchild array is found in the schild array, we return
* the index of that entry. Otherwise we return -1.
*/
static int
find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
nvlist_t **schild, uint_t schildren)
{
uint_t mc;
for (mc = 0; mc < mchildren; mc++) {
uint_t sc;
char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
mchild[mc], 0);
for (sc = 0; sc < schildren; sc++) {
char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
schild[sc], 0);
boolean_t result = (strcmp(mpath, spath) == 0);
free(spath);
if (result) {
free(mpath);
return (mc);
}
}
free(mpath);
}
return (-1);
}
/*
* Split a mirror pool. If newroot points to null, then a new nvlist
* is generated and it is the responsibility of the caller to free it.
*/
int
zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
nvlist_t *props, splitflags_t flags)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
const char *bias;
nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
nvlist_t **varray = NULL, *zc_props = NULL;
uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
libzfs_handle_t *hdl = zhp->zpool_hdl;
uint64_t vers, readonly = B_FALSE;
boolean_t freelist = B_FALSE, memory_err = B_TRUE;
int retval = 0;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
if (!zpool_name_valid(hdl, B_FALSE, newname))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
if ((config = zpool_get_config(zhp, NULL)) == NULL) {
(void) fprintf(stderr, gettext("Internal error: unable to "
"retrieve pool configuration\n"));
return (-1);
}
tree = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
vers = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
if (props) {
prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
props, vers, flags, errbuf)) == NULL)
return (-1);
(void) nvlist_lookup_uint64(zc_props,
zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
if (readonly) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property %s can only be set at import time"),
zpool_prop_to_name(ZPOOL_PROP_READONLY));
return (-1);
}
}
if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Source pool is missing vdev tree"));
nvlist_free(zc_props);
return (-1);
}
varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
vcount = 0;
if (*newroot == NULL ||
nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
&newchild, &newchildren) != 0)
newchildren = 0;
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE, is_hole = B_FALSE;
boolean_t is_special = B_FALSE, is_dedup = B_FALSE;
const char *type;
nvlist_t **mchild, *vdev;
uint_t mchildren;
int entry;
/*
* Unlike cache & spares, slogs are stored in the
* ZPOOL_CONFIG_CHILDREN array. We filter them out here.
*/
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&is_hole);
if (is_log || is_hole) {
/*
* Create a hole vdev and put it in the config.
*/
if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
goto out;
if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_HOLE) != 0)
goto out;
if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
1) != 0)
goto out;
if (lastlog == 0)
lastlog = vcount;
varray[vcount++] = vdev;
continue;
}
lastlog = 0;
type = fnvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE);
if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) {
vdev = child[c];
if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
goto out;
continue;
} else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Source pool must be composed only of mirrors\n"));
retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
goto out;
}
if (nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0) {
if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
is_special = B_TRUE;
else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
is_dedup = B_TRUE;
}
verify(nvlist_lookup_nvlist_array(child[c],
ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
/* find or add an entry for this top-level vdev */
if (newchildren > 0 &&
(entry = find_vdev_entry(zhp, mchild, mchildren,
newchild, newchildren)) >= 0) {
/* We found a disk that the user specified. */
vdev = mchild[entry];
++found;
} else {
/* User didn't specify a disk for this vdev. */
vdev = mchild[mchildren - 1];
}
if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
goto out;
if (flags.dryrun != 0) {
if (is_dedup == B_TRUE) {
if (nvlist_add_string(varray[vcount - 1],
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_DEDUP) != 0)
goto out;
} else if (is_special == B_TRUE) {
if (nvlist_add_string(varray[vcount - 1],
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_SPECIAL) != 0)
goto out;
}
}
}
/* did we find every disk the user specified? */
if (found != newchildren) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
"include at most one disk from each mirror"));
retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
goto out;
}
/* Prepare the nvlist for populating. */
if (*newroot == NULL) {
if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
goto out;
freelist = B_TRUE;
if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_ROOT) != 0)
goto out;
} else {
verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
}
/* Add all the children we found */
if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t **)varray, lastlog == 0 ? vcount : lastlog) != 0)
goto out;
/*
* If we're just doing a dry run, exit now with success.
*/
if (flags.dryrun) {
memory_err = B_FALSE;
freelist = B_FALSE;
goto out;
}
/* now build up the config list & call the ioctl */
if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
goto out;
if (nvlist_add_nvlist(newconfig,
ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
nvlist_add_string(newconfig,
ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
goto out;
/*
* The new pool is automatically part of the namespace unless we
* explicitly export it.
*/
if (!flags.import)
zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
zcmd_write_conf_nvlist(hdl, &zc, newconfig);
if (zc_props != NULL)
zcmd_write_src_nvlist(hdl, &zc, zc_props);
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
retval = zpool_standard_error(hdl, errno, errbuf);
goto out;
}
freelist = B_FALSE;
memory_err = B_FALSE;
out:
if (varray != NULL) {
int v;
for (v = 0; v < vcount; v++)
nvlist_free(varray[v]);
free(varray);
}
zcmd_free_nvlists(&zc);
nvlist_free(zc_props);
nvlist_free(newconfig);
if (freelist) {
nvlist_free(*newroot);
*newroot = NULL;
}
if (retval != 0)
return (retval);
if (memory_err)
return (no_memory(hdl));
return (0);
}
/*
* Remove the given device.
*/
int
zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
libzfs_handle_t *hdl = zhp->zpool_hdl;
uint64_t version;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
if (zpool_is_draid_spare(path)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dRAID spares cannot be removed"));
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
}
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
&islog)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if (islog && version < SPA_VERSION_HOLES) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to support log removal"));
return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
}
zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
return (0);
switch (errno) {
+ case EALREADY:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "removal for this vdev is already in progress."));
+ (void) zfs_error(hdl, EZFS_BUSY, errbuf);
+ break;
+
case EINVAL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid config; all top-level vdevs must "
"have the same sector size and not be raidz."));
(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
break;
case EBUSY:
if (islog) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Mount encrypted datasets to replay logs."));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Pool busy; removal may already be in progress"));
}
(void) zfs_error(hdl, EZFS_BUSY, errbuf);
break;
case EACCES:
if (islog) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Mount encrypted datasets to replay logs."));
(void) zfs_error(hdl, EZFS_BUSY, errbuf);
} else {
(void) zpool_standard_error(hdl, errno, errbuf);
}
break;
default:
(void) zpool_standard_error(hdl, errno, errbuf);
}
return (-1);
}
int
zpool_vdev_remove_cancel(zpool_handle_t *zhp)
{
zfs_cmd_t zc = {{0}};
char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot cancel removal"));
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_cookie = 1;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, errbuf));
}
int
zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,
uint64_t *sizep)
{
char errbuf[ERRBUFLEN];
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),
path);
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
&islog)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
if (avail_spare || l2cache || islog) {
*sizep = 0;
return (0);
}
if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"indirect size not available"));
return (zfs_error(hdl, EINVAL, errbuf));
}
return (0);
}
/*
* Clear the errors for the pool, or the particular device if specified.
*/
int
zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
nvlist_t *tgt;
zpool_load_policy_t policy;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
nvlist_t *nvi = NULL;
int error;
if (path)
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
path);
else
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
zhp->zpool_name);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (path) {
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
&l2cache, NULL)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
/*
* Don't allow error clearing for hot spares. Do allow
* error clearing for l2cache devices.
*/
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
}
zpool_get_load_policy(rewindnvl, &policy);
zc.zc_cookie = policy.zlp_rewind;
zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2);
zcmd_write_src_nvlist(hdl, &zc, rewindnvl);
while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
errno == ENOMEM)
zcmd_expand_dst_nvlist(hdl, &zc);
if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) &&
errno != EPERM && errno != EACCES)) {
if (policy.zlp_rewind &
(ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
zpool_rewind_exclaim(hdl, zc.zc_name,
((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0),
nvi);
nvlist_free(nvi);
}
zcmd_free_nvlists(&zc);
return (0);
}
zcmd_free_nvlists(&zc);
return (zpool_standard_error(hdl, errno, errbuf));
}
/*
* Similar to zpool_clear(), but takes a GUID (used by fmd).
*/
int
zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
(u_longlong_t)guid);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_guid = guid;
zc.zc_cookie = ZPOOL_NO_REWIND;
if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, errbuf));
}
/*
* Change the GUID for a pool.
*/
int
zpool_reguid(zpool_handle_t *zhp)
{
char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zpool_hdl;
zfs_cmd_t zc = {"\0"};
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, errbuf));
}
/*
* Reopen the pool.
*/
int
zpool_reopen_one(zpool_handle_t *zhp, void *data)
{
libzfs_handle_t *hdl = zpool_get_handle(zhp);
const char *pool_name = zpool_get_name(zhp);
boolean_t *scrub_restart = data;
int error;
error = lzc_reopen(pool_name, *scrub_restart);
if (error) {
return (zpool_standard_error_fmt(hdl, error,
dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));
}
return (0);
}
/* call into libzfs_core to execute the sync IOCTL per pool */
int
zpool_sync_one(zpool_handle_t *zhp, void *data)
{
int ret;
libzfs_handle_t *hdl = zpool_get_handle(zhp);
const char *pool_name = zpool_get_name(zhp);
boolean_t *force = data;
nvlist_t *innvl = fnvlist_alloc();
fnvlist_add_boolean_value(innvl, "force", *force);
if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {
nvlist_free(innvl);
return (zpool_standard_error_fmt(hdl, ret,
dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));
}
nvlist_free(innvl);
return (0);
}
#define PATH_BUF_LEN 64
/*
* Given a vdev, return the name to display in iostat. If the vdev has a path,
* we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
* We also check if this is a whole disk, in which case we strip off the
* trailing 's0' slice name.
*
* This routine is also responsible for identifying when disks have been
* reconfigured in a new location. The kernel will have opened the device by
* devid, but the path will still refer to the old location. To catch this, we
* first do a path -> devid translation (which is fast for the common case). If
* the devid matches, we're done. If not, we do a reverse devid -> path
* translation and issue the appropriate ioctl() to update the path of the vdev.
* If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
* of these checks.
*/
char *
zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
int name_flags)
{
const char *type, *tpath;
const char *path;
uint64_t value;
char buf[PATH_BUF_LEN];
char tmpbuf[PATH_BUF_LEN * 2];
/*
* vdev_name will be "root"/"root-0" for the root vdev, but it is the
* zpool name that will be displayed to the user.
*/
type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
if (zhp != NULL && strcmp(type, "root") == 0)
return (zfs_strdup(hdl, zpool_get_name(zhp)));
if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_PATH"))
name_flags |= VDEV_NAME_PATH;
if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_GUID"))
name_flags |= VDEV_NAME_GUID;
if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_FOLLOW_LINKS"))
name_flags |= VDEV_NAME_FOLLOW_LINKS;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
name_flags & VDEV_NAME_GUID) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
(void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
path = buf;
} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &tpath) == 0) {
path = tpath;
if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
char *rp = realpath(path, NULL);
if (rp) {
strlcpy(buf, rp, sizeof (buf));
path = buf;
free(rp);
}
}
/*
* For a block device only use the name.
*/
if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
!(name_flags & VDEV_NAME_PATH)) {
path = zfs_strip_path(path);
}
/*
* Remove the partition from the path if this is a whole disk.
*/
if (strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0 &&
nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
== 0 && value && !(name_flags & VDEV_NAME_PATH)) {
return (zfs_strip_partition(path));
}
} else {
path = type;
/*
* If it's a raidz device, we need to stick in the parity level.
*/
if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
value = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY);
(void) snprintf(buf, sizeof (buf), "%s%llu", path,
(u_longlong_t)value);
path = buf;
}
/*
* If it's a dRAID device, we add parity, groups, and spares.
*/
if (strcmp(path, VDEV_TYPE_DRAID) == 0) {
uint64_t ndata, nparity, nspares;
nvlist_t **child;
uint_t children;
verify(nvlist_lookup_nvlist_array(nv,
ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
nparity = fnvlist_lookup_uint64(nv,
ZPOOL_CONFIG_NPARITY);
ndata = fnvlist_lookup_uint64(nv,
ZPOOL_CONFIG_DRAID_NDATA);
nspares = fnvlist_lookup_uint64(nv,
ZPOOL_CONFIG_DRAID_NSPARES);
path = zpool_draid_name(buf, sizeof (buf), ndata,
nparity, nspares, children);
}
/*
* We identify each top-level vdev by using a <type-id>
* naming convention.
*/
if (name_flags & VDEV_NAME_TYPE_ID) {
uint64_t id = fnvlist_lookup_uint64(nv,
ZPOOL_CONFIG_ID);
(void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
path, (u_longlong_t)id);
path = tmpbuf;
}
}
return (zfs_strdup(hdl, path));
}
static int
zbookmark_mem_compare(const void *a, const void *b)
{
return (memcmp(a, b, sizeof (zbookmark_phys_t)));
}
/*
* Retrieve the persistent error log, uniquify the members, and return to the
* caller.
*/
int
zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zpool_hdl;
zbookmark_phys_t *buf;
uint64_t buflen = 10000; /* approx. 1MB of RAM */
if (fnvlist_lookup_uint64(zhp->zpool_config,
ZPOOL_CONFIG_ERRCOUNT) == 0)
return (0);
/*
* Retrieve the raw error list from the kernel. If it doesn't fit,
* allocate a larger buffer and retry.
*/
(void) strcpy(zc.zc_name, zhp->zpool_name);
for (;;) {
buf = zfs_alloc(zhp->zpool_hdl,
buflen * sizeof (zbookmark_phys_t));
zc.zc_nvlist_dst = (uintptr_t)buf;
zc.zc_nvlist_dst_size = buflen;
if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG,
&zc) != 0) {
free(buf);
if (errno == ENOMEM) {
buflen *= 2;
} else {
return (zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "errors: List of "
"errors unavailable")));
}
} else {
break;
}
}
/*
* Sort the resulting bookmarks. This is a little confusing due to the
* implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
* to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks
* _not_ copied as part of the process. So we point the start of our
* array appropriate and decrement the total number of elements.
*/
zbookmark_phys_t *zb = buf + zc.zc_nvlist_dst_size;
uint64_t zblen = buflen - zc.zc_nvlist_dst_size;
qsort(zb, zblen, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
/*
* Fill in the nverrlistp with nvlist's of dataset and object numbers.
*/
for (uint64_t i = 0; i < zblen; i++) {
nvlist_t *nv;
/* ignoring zb_blkid and zb_level for now */
if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
zb[i-1].zb_object == zb[i].zb_object)
continue;
if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
goto nomem;
if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
zb[i].zb_objset) != 0) {
nvlist_free(nv);
goto nomem;
}
if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
zb[i].zb_object) != 0) {
nvlist_free(nv);
goto nomem;
}
if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
nvlist_free(nv);
goto nomem;
}
nvlist_free(nv);
}
free(buf);
return (0);
nomem:
free(buf);
return (no_memory(zhp->zpool_hdl));
}
/*
* Upgrade a ZFS pool to the latest on-disk version.
*/
int
zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strcpy(zc.zc_name, zhp->zpool_name);
zc.zc_cookie = new_version;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
return (zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
zhp->zpool_name));
return (0);
}
void
zfs_save_arguments(int argc, char **argv, char *string, int len)
{
int i;
(void) strlcpy(string, zfs_basename(argv[0]), len);
for (i = 1; i < argc; i++) {
(void) strlcat(string, " ", len);
(void) strlcat(string, argv[i], len);
}
}
int
zpool_log_history(libzfs_handle_t *hdl, const char *message)
{
zfs_cmd_t zc = {"\0"};
nvlist_t *args;
args = fnvlist_alloc();
fnvlist_add_string(args, "message", message);
zcmd_write_src_nvlist(hdl, &zc, args);
int err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc);
nvlist_free(args);
zcmd_free_nvlists(&zc);
return (err);
}
/*
* Perform ioctl to get some command history of a pool.
*
* 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
* logical offset of the history buffer to start reading from.
*
* Upon return, 'off' is the next logical offset to read from and
* 'len' is the actual amount of bytes read into 'buf'.
*/
static int
get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_history = (uint64_t)(uintptr_t)buf;
zc.zc_history_len = *len;
zc.zc_history_offset = *off;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
switch (errno) {
case EPERM:
return (zfs_error_fmt(hdl, EZFS_PERM,
dgettext(TEXT_DOMAIN,
"cannot show history for pool '%s'"),
zhp->zpool_name));
case ENOENT:
return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
dgettext(TEXT_DOMAIN, "cannot get history for pool "
"'%s'"), zhp->zpool_name));
case ENOTSUP:
return (zfs_error_fmt(hdl, EZFS_BADVERSION,
dgettext(TEXT_DOMAIN, "cannot get history for pool "
"'%s', pool must be upgraded"), zhp->zpool_name));
default:
return (zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN,
"cannot get history for '%s'"), zhp->zpool_name));
}
}
*len = zc.zc_history_len;
*off = zc.zc_history_offset;
return (0);
}
/*
* Retrieve the command history of a pool.
*/
int
zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off,
boolean_t *eof)
{
libzfs_handle_t *hdl = zhp->zpool_hdl;
char *buf;
int buflen = 128 * 1024;
nvlist_t **records = NULL;
uint_t numrecords = 0;
int err = 0, i;
uint64_t start = *off;
buf = zfs_alloc(hdl, buflen);
/* process about 1MiB a time */
while (*off - start < 1024 * 1024) {
uint64_t bytes_read = buflen;
uint64_t leftover;
if ((err = get_history(zhp, buf, off, &bytes_read)) != 0)
break;
/* if nothing else was read in, we're at EOF, just return */
if (!bytes_read) {
*eof = B_TRUE;
break;
}
if ((err = zpool_history_unpack(buf, bytes_read,
&leftover, &records, &numrecords)) != 0) {
zpool_standard_error_fmt(hdl, err,
dgettext(TEXT_DOMAIN,
"cannot get history for '%s'"), zhp->zpool_name);
break;
}
*off -= leftover;
if (leftover == bytes_read) {
/*
* no progress made, because buffer is not big enough
* to hold this record; resize and retry.
*/
buflen *= 2;
free(buf);
buf = zfs_alloc(hdl, buflen);
}
}
free(buf);
if (!err) {
*nvhisp = fnvlist_alloc();
fnvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
(const nvlist_t **)records, numrecords);
}
for (i = 0; i < numrecords; i++)
nvlist_free(records[i]);
free(records);
return (err);
}
/*
* Retrieve the next event given the passed 'zevent_fd' file descriptor.
* If there is a new event available 'nvp' will contain a newly allocated
* nvlist and 'dropped' will be set to the number of missed events since
* the last call to this function. When 'nvp' is set to NULL it indicates
* no new events are available. In either case the function returns 0 and
* it is up to the caller to free 'nvp'. In the case of a fatal error the
* function will return a non-zero value. When the function is called in
* blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
* it will not return until a new event is available.
*/
int
zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
int *dropped, unsigned flags, int zevent_fd)
{
zfs_cmd_t zc = {"\0"};
int error = 0;
*nvp = NULL;
*dropped = 0;
zc.zc_cleanup_fd = zevent_fd;
if (flags & ZEVENT_NONBLOCK)
zc.zc_guid = ZEVENT_NONBLOCK;
zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE);
retry:
if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
switch (errno) {
case ESHUTDOWN:
error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
dgettext(TEXT_DOMAIN, "zfs shutdown"));
goto out;
case ENOENT:
/* Blocking error case should not occur */
if (!(flags & ZEVENT_NONBLOCK))
error = zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot get event"));
goto out;
case ENOMEM:
zcmd_expand_dst_nvlist(hdl, &zc);
goto retry;
default:
error = zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot get event"));
goto out;
}
}
error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
if (error != 0)
goto out;
*dropped = (int)zc.zc_cookie;
out:
zcmd_free_nvlists(&zc);
return (error);
}
/*
* Clear all events.
*/
int
zpool_events_clear(libzfs_handle_t *hdl, int *count)
{
zfs_cmd_t zc = {"\0"};
if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
return (zpool_standard_error(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot clear events")));
if (count != NULL)
*count = (int)zc.zc_cookie; /* # of events cleared */
return (0);
}
/*
* Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
* the passed zevent_fd file handle. On success zero is returned,
* otherwise -1 is returned and hdl->libzfs_error is set to the errno.
*/
int
zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
{
zfs_cmd_t zc = {"\0"};
int error = 0;
zc.zc_guid = eid;
zc.zc_cleanup_fd = zevent_fd;
if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
switch (errno) {
case ENOENT:
error = zfs_error_fmt(hdl, EZFS_NOENT,
dgettext(TEXT_DOMAIN, "cannot get event"));
break;
case ENOMEM:
error = zfs_error_fmt(hdl, EZFS_NOMEM,
dgettext(TEXT_DOMAIN, "cannot get event"));
break;
default:
error = zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot get event"));
break;
}
}
return (error);
}
static void
zpool_obj_to_path_impl(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
char *pathname, size_t len, boolean_t always_unmounted)
{
zfs_cmd_t zc = {"\0"};
boolean_t mounted = B_FALSE;
char *mntpnt = NULL;
char dsname[ZFS_MAX_DATASET_NAME_LEN];
if (dsobj == 0) {
/* special case for the MOS */
(void) snprintf(pathname, len, "<metadata>:<0x%llx>",
(longlong_t)obj);
return;
}
/* get the dataset's name */
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_obj = dsobj;
if (zfs_ioctl(zhp->zpool_hdl,
ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
/* just write out a path of two object numbers */
(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
(longlong_t)dsobj, (longlong_t)obj);
return;
}
(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
/* find out if the dataset is mounted */
mounted = !always_unmounted && is_mounted(zhp->zpool_hdl, dsname,
&mntpnt);
/* get the corrupted object's path */
(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
zc.zc_obj = obj;
if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH,
&zc) == 0) {
if (mounted) {
(void) snprintf(pathname, len, "%s%s", mntpnt,
zc.zc_value);
} else {
(void) snprintf(pathname, len, "%s:%s",
dsname, zc.zc_value);
}
} else {
(void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
(longlong_t)obj);
}
free(mntpnt);
}
void
zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
char *pathname, size_t len)
{
zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_FALSE);
}
void
zpool_obj_to_path_ds(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
char *pathname, size_t len)
{
zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_TRUE);
}
/*
* Wait while the specified activity is in progress in the pool.
*/
int
zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity)
{
boolean_t missing;
int error = zpool_wait_status(zhp, activity, &missing, NULL);
if (missing) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT,
dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
zhp->zpool_name);
return (ENOENT);
} else {
return (error);
}
}
/*
* Wait for the given activity and return the status of the wait (whether or not
* any waiting was done) in the 'waited' parameter. Non-existent pools are
* reported via the 'missing' parameter, rather than by printing an error
* message. This is convenient when this function is called in a loop over a
* long period of time (as it is, for example, by zpool's wait cmd). In that
* scenario, a pool being exported or destroyed should be considered a normal
* event, so we don't want to print an error when we find that the pool doesn't
* exist.
*/
int
zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity,
boolean_t *missing, boolean_t *waited)
{
int error = lzc_wait(zhp->zpool_name, activity, waited);
*missing = (error == ENOENT);
if (*missing)
return (0);
if (error != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
zhp->zpool_name);
}
return (error);
}
int
zpool_set_bootenv(zpool_handle_t *zhp, const nvlist_t *envmap)
{
int error = lzc_set_bootenv(zhp->zpool_name, envmap);
if (error != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
dgettext(TEXT_DOMAIN,
"error setting bootenv in pool '%s'"), zhp->zpool_name);
}
return (error);
}
int
zpool_get_bootenv(zpool_handle_t *zhp, nvlist_t **nvlp)
{
nvlist_t *nvl;
int error;
nvl = NULL;
error = lzc_get_bootenv(zhp->zpool_name, &nvl);
if (error != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
dgettext(TEXT_DOMAIN,
"error getting bootenv in pool '%s'"), zhp->zpool_name);
} else {
*nvlp = nvl;
}
return (error);
}
/*
* Attempt to read and parse feature file(s) (from "compatibility" property).
* Files contain zpool feature names, comma or whitespace-separated.
* Comments (# character to next newline) are discarded.
*
* Arguments:
* compatibility : string containing feature filenames
* features : either NULL or pointer to array of boolean
* report : either NULL or pointer to string buffer
* rlen : length of "report" buffer
*
* compatibility is NULL (unset), "", "off", "legacy", or list of
* comma-separated filenames. filenames should either be absolute,
* or relative to:
* 1) ZPOOL_SYSCONF_COMPAT_D (eg: /etc/zfs/compatibility.d) or
* 2) ZPOOL_DATA_COMPAT_D (eg: /usr/share/zfs/compatibility.d).
* (Unset), "" or "off" => enable all features
* "legacy" => disable all features
*
* Any feature names read from files which match unames in spa_feature_table
* will have the corresponding boolean set in the features array (if non-NULL).
* If more than one feature set specified, only features present in *all* of
* them will be set.
*
* "report" if not NULL will be populated with a suitable status message.
*
* Return values:
* ZPOOL_COMPATIBILITY_OK : files read and parsed ok
* ZPOOL_COMPATIBILITY_BADFILE : file too big or not a text file
* ZPOOL_COMPATIBILITY_BADTOKEN : SYSCONF file contains invalid feature name
* ZPOOL_COMPATIBILITY_WARNTOKEN : DATA file contains invalid feature name
* ZPOOL_COMPATIBILITY_NOFILES : no feature files found
*/
zpool_compat_status_t
zpool_load_compat(const char *compat, boolean_t *features, char *report,
size_t rlen)
{
int sdirfd, ddirfd, featfd;
struct stat fs;
char *fc;
char *ps, *ls, *ws;
char *file, *line, *word;
char l_compat[ZFS_MAXPROPLEN];
boolean_t ret_nofiles = B_TRUE;
boolean_t ret_badfile = B_FALSE;
boolean_t ret_badtoken = B_FALSE;
boolean_t ret_warntoken = B_FALSE;
/* special cases (unset), "" and "off" => enable all features */
if (compat == NULL || compat[0] == '\0' ||
strcmp(compat, ZPOOL_COMPAT_OFF) == 0) {
if (features != NULL)
for (uint_t i = 0; i < SPA_FEATURES; i++)
features[i] = B_TRUE;
if (report != NULL)
strlcpy(report, gettext("all features enabled"), rlen);
return (ZPOOL_COMPATIBILITY_OK);
}
/* Final special case "legacy" => disable all features */
if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
if (features != NULL)
for (uint_t i = 0; i < SPA_FEATURES; i++)
features[i] = B_FALSE;
if (report != NULL)
strlcpy(report, gettext("all features disabled"), rlen);
return (ZPOOL_COMPATIBILITY_OK);
}
/*
* Start with all true; will be ANDed with results from each file
*/
if (features != NULL)
for (uint_t i = 0; i < SPA_FEATURES; i++)
features[i] = B_TRUE;
char err_badfile[ZFS_MAXPROPLEN] = "";
char err_badtoken[ZFS_MAXPROPLEN] = "";
/*
* We ignore errors from the directory open()
* as they're only needed if the filename is relative
* which will be checked during the openat().
*/
/* O_PATH safer than O_RDONLY if system allows it */
#if defined(O_PATH)
#define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_PATH)
#else
#define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_RDONLY)
#endif
sdirfd = open(ZPOOL_SYSCONF_COMPAT_D, ZC_DIR_FLAGS);
ddirfd = open(ZPOOL_DATA_COMPAT_D, ZC_DIR_FLAGS);
(void) strlcpy(l_compat, compat, ZFS_MAXPROPLEN);
for (file = strtok_r(l_compat, ",", &ps);
file != NULL;
file = strtok_r(NULL, ",", &ps)) {
boolean_t l_features[SPA_FEATURES];
enum { Z_SYSCONF, Z_DATA } source;
/* try sysconfdir first, then datadir */
source = Z_SYSCONF;
if ((featfd = openat(sdirfd, file, O_RDONLY | O_CLOEXEC)) < 0) {
featfd = openat(ddirfd, file, O_RDONLY | O_CLOEXEC);
source = Z_DATA;
}
/* File readable and correct size? */
if (featfd < 0 ||
fstat(featfd, &fs) < 0 ||
fs.st_size < 1 ||
fs.st_size > ZPOOL_COMPAT_MAXSIZE) {
(void) close(featfd);
strlcat(err_badfile, file, ZFS_MAXPROPLEN);
strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
ret_badfile = B_TRUE;
continue;
}
/* Prefault the file if system allows */
#if defined(MAP_POPULATE)
#define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_POPULATE)
#elif defined(MAP_PREFAULT_READ)
#define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_PREFAULT_READ)
#else
#define ZC_MMAP_FLAGS (MAP_PRIVATE)
#endif
/* private mmap() so we can strtok safely */
fc = (char *)mmap(NULL, fs.st_size, PROT_READ | PROT_WRITE,
ZC_MMAP_FLAGS, featfd, 0);
(void) close(featfd);
/* map ok, and last character == newline? */
if (fc == MAP_FAILED || fc[fs.st_size - 1] != '\n') {
(void) munmap((void *) fc, fs.st_size);
strlcat(err_badfile, file, ZFS_MAXPROPLEN);
strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
ret_badfile = B_TRUE;
continue;
}
ret_nofiles = B_FALSE;
for (uint_t i = 0; i < SPA_FEATURES; i++)
l_features[i] = B_FALSE;
/* replace final newline with NULL to ensure string ends */
fc[fs.st_size - 1] = '\0';
for (line = strtok_r(fc, "\n", &ls);
line != NULL;
line = strtok_r(NULL, "\n", &ls)) {
/* discard comments */
char *r = strchr(line, '#');
if (r != NULL)
*r = '\0';
for (word = strtok_r(line, ", \t", &ws);
word != NULL;
word = strtok_r(NULL, ", \t", &ws)) {
/* Find matching feature name */
uint_t f;
for (f = 0; f < SPA_FEATURES; f++) {
zfeature_info_t *fi =
&spa_feature_table[f];
if (strcmp(word, fi->fi_uname) == 0) {
l_features[f] = B_TRUE;
break;
}
}
if (f < SPA_FEATURES)
continue;
/* found an unrecognized word */
/* lightly sanitize it */
if (strlen(word) > 32)
word[32] = '\0';
for (char *c = word; *c != '\0'; c++)
if (!isprint(*c))
*c = '?';
strlcat(err_badtoken, word, ZFS_MAXPROPLEN);
strlcat(err_badtoken, " ", ZFS_MAXPROPLEN);
if (source == Z_SYSCONF)
ret_badtoken = B_TRUE;
else
ret_warntoken = B_TRUE;
}
}
(void) munmap((void *) fc, fs.st_size);
if (features != NULL)
for (uint_t i = 0; i < SPA_FEATURES; i++)
features[i] &= l_features[i];
}
(void) close(sdirfd);
(void) close(ddirfd);
/* Return the most serious error */
if (ret_badfile) {
if (report != NULL)
snprintf(report, rlen, gettext("could not read/"
"parse feature file(s): %s"), err_badfile);
return (ZPOOL_COMPATIBILITY_BADFILE);
}
if (ret_nofiles) {
if (report != NULL)
strlcpy(report,
gettext("no valid compatibility files specified"),
rlen);
return (ZPOOL_COMPATIBILITY_NOFILES);
}
if (ret_badtoken) {
if (report != NULL)
snprintf(report, rlen, gettext("invalid feature "
"name(s) in local compatibility files: %s"),
err_badtoken);
return (ZPOOL_COMPATIBILITY_BADTOKEN);
}
if (ret_warntoken) {
if (report != NULL)
snprintf(report, rlen, gettext("unrecognized feature "
"name(s) in distribution compatibility files: %s"),
err_badtoken);
return (ZPOOL_COMPATIBILITY_WARNTOKEN);
}
if (report != NULL)
strlcpy(report, gettext("compatibility set ok"), rlen);
return (ZPOOL_COMPATIBILITY_OK);
}
static int
zpool_vdev_guid(zpool_handle_t *zhp, const char *vdevname, uint64_t *vdev_guid)
{
nvlist_t *tgt;
boolean_t avail_spare, l2cache;
verify(zhp != NULL);
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "pool is in an unavailable state"));
return (zfs_error(zhp->zpool_hdl, EZFS_POOLUNAVAIL, errbuf));
}
if ((tgt = zpool_find_vdev(zhp, vdevname, &avail_spare, &l2cache,
NULL)) == NULL) {
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "can not find %s in %s"),
vdevname, zhp->zpool_name);
return (zfs_error(zhp->zpool_hdl, EZFS_NODEVICE, errbuf));
}
*vdev_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
return (0);
}
/*
* Get a vdev property value for 'prop' and return the value in
* a pre-allocated buffer.
*/
int
zpool_get_vdev_prop_value(nvlist_t *nvprop, vdev_prop_t prop, char *prop_name,
char *buf, size_t len, zprop_source_t *srctype, boolean_t literal)
{
nvlist_t *nv;
const char *strval;
uint64_t intval;
zprop_source_t src = ZPROP_SRC_NONE;
if (prop == VDEV_PROP_USERPROP) {
/* user property, prop_name must contain the property name */
assert(prop_name != NULL);
if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
strval = fnvlist_lookup_string(nv, ZPROP_VALUE);
} else {
/* user prop not found */
return (-1);
}
(void) strlcpy(buf, strval, len);
if (srctype)
*srctype = src;
return (0);
}
if (prop_name == NULL)
prop_name = (char *)vdev_prop_to_name(prop);
switch (vdev_prop_get_type(prop)) {
case PROP_TYPE_STRING:
if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
strval = fnvlist_lookup_string(nv, ZPROP_VALUE);
} else {
src = ZPROP_SRC_DEFAULT;
if ((strval = vdev_prop_default_string(prop)) == NULL)
strval = "-";
}
(void) strlcpy(buf, strval, len);
break;
case PROP_TYPE_NUMBER:
if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
} else {
src = ZPROP_SRC_DEFAULT;
intval = vdev_prop_default_numeric(prop);
}
switch (prop) {
case VDEV_PROP_ASIZE:
case VDEV_PROP_PSIZE:
case VDEV_PROP_SIZE:
case VDEV_PROP_BOOTSIZE:
case VDEV_PROP_ALLOCATED:
case VDEV_PROP_FREE:
case VDEV_PROP_READ_ERRORS:
case VDEV_PROP_WRITE_ERRORS:
case VDEV_PROP_CHECKSUM_ERRORS:
case VDEV_PROP_INITIALIZE_ERRORS:
case VDEV_PROP_OPS_NULL:
case VDEV_PROP_OPS_READ:
case VDEV_PROP_OPS_WRITE:
case VDEV_PROP_OPS_FREE:
case VDEV_PROP_OPS_CLAIM:
case VDEV_PROP_OPS_TRIM:
case VDEV_PROP_BYTES_NULL:
case VDEV_PROP_BYTES_READ:
case VDEV_PROP_BYTES_WRITE:
case VDEV_PROP_BYTES_FREE:
case VDEV_PROP_BYTES_CLAIM:
case VDEV_PROP_BYTES_TRIM:
if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) zfs_nicenum(intval, buf, len);
}
break;
case VDEV_PROP_EXPANDSZ:
if (intval == 0) {
(void) strlcpy(buf, "-", len);
} else if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) zfs_nicenum(intval, buf, len);
}
break;
case VDEV_PROP_CAPACITY:
if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) snprintf(buf, len, "%llu%%",
(u_longlong_t)intval);
}
break;
case VDEV_PROP_CHECKSUM_N:
case VDEV_PROP_CHECKSUM_T:
case VDEV_PROP_IO_N:
case VDEV_PROP_IO_T:
if (intval == UINT64_MAX) {
(void) strlcpy(buf, "-", len);
} else {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
}
break;
case VDEV_PROP_FRAGMENTATION:
if (intval == UINT64_MAX) {
(void) strlcpy(buf, "-", len);
} else {
(void) snprintf(buf, len, "%llu%%",
(u_longlong_t)intval);
}
break;
case VDEV_PROP_STATE:
if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) strlcpy(buf, zpool_state_to_name(intval,
VDEV_AUX_NONE), len);
}
break;
default:
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
}
break;
case PROP_TYPE_INDEX:
if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
} else {
src = ZPROP_SRC_DEFAULT;
intval = vdev_prop_default_numeric(prop);
}
if (vdev_prop_index_to_string(prop, intval,
(const char **)&strval) != 0)
return (-1);
(void) strlcpy(buf, strval, len);
break;
default:
abort();
}
if (srctype)
*srctype = src;
return (0);
}
/*
* Get a vdev property value for 'prop_name' and return the value in
* a pre-allocated buffer.
*/
int
zpool_get_vdev_prop(zpool_handle_t *zhp, const char *vdevname, vdev_prop_t prop,
char *prop_name, char *buf, size_t len, zprop_source_t *srctype,
boolean_t literal)
{
nvlist_t *reqnvl, *reqprops;
nvlist_t *retprops = NULL;
uint64_t vdev_guid = 0;
int ret;
if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
return (ret);
if (nvlist_alloc(&reqnvl, NV_UNIQUE_NAME, 0) != 0)
return (no_memory(zhp->zpool_hdl));
if (nvlist_alloc(&reqprops, NV_UNIQUE_NAME, 0) != 0)
return (no_memory(zhp->zpool_hdl));
fnvlist_add_uint64(reqnvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);
if (prop != VDEV_PROP_USERPROP) {
/* prop_name overrides prop value */
if (prop_name != NULL)
prop = vdev_name_to_prop(prop_name);
else
prop_name = (char *)vdev_prop_to_name(prop);
assert(prop < VDEV_NUM_PROPS);
}
assert(prop_name != NULL);
if (nvlist_add_uint64(reqprops, prop_name, prop) != 0) {
nvlist_free(reqnvl);
nvlist_free(reqprops);
return (no_memory(zhp->zpool_hdl));
}
fnvlist_add_nvlist(reqnvl, ZPOOL_VDEV_PROPS_GET_PROPS, reqprops);
ret = lzc_get_vdev_prop(zhp->zpool_name, reqnvl, &retprops);
if (ret == 0) {
ret = zpool_get_vdev_prop_value(retprops, prop, prop_name, buf,
len, srctype, literal);
} else {
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot get vdev property %s from"
" %s in %s"), prop_name, vdevname, zhp->zpool_name);
(void) zpool_standard_error(zhp->zpool_hdl, ret, errbuf);
}
nvlist_free(reqnvl);
nvlist_free(reqprops);
nvlist_free(retprops);
return (ret);
}
/*
* Get all vdev properties
*/
int
zpool_get_all_vdev_props(zpool_handle_t *zhp, const char *vdevname,
nvlist_t **outnvl)
{
nvlist_t *nvl = NULL;
uint64_t vdev_guid = 0;
int ret;
if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
return (ret);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
return (no_memory(zhp->zpool_hdl));
fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);
ret = lzc_get_vdev_prop(zhp->zpool_name, nvl, outnvl);
nvlist_free(nvl);
if (ret) {
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot get vdev properties for"
" %s in %s"), vdevname, zhp->zpool_name);
(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
}
return (ret);
}
/*
* Set vdev property
*/
int
zpool_set_vdev_prop(zpool_handle_t *zhp, const char *vdevname,
const char *propname, const char *propval)
{
int ret;
nvlist_t *nvl = NULL;
nvlist_t *outnvl = NULL;
nvlist_t *props;
nvlist_t *realprops;
prop_flags_t flags = { 0 };
uint64_t version;
uint64_t vdev_guid;
if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
return (ret);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
return (no_memory(zhp->zpool_hdl));
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
return (no_memory(zhp->zpool_hdl));
fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_SET_VDEV, vdev_guid);
if (nvlist_add_string(props, propname, propval) != 0) {
nvlist_free(props);
return (no_memory(zhp->zpool_hdl));
}
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set property %s for %s on %s"),
propname, vdevname, zhp->zpool_name);
flags.vdevprop = 1;
version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
zhp->zpool_name, props, version, flags, errbuf)) == NULL) {
nvlist_free(props);
nvlist_free(nvl);
return (-1);
}
nvlist_free(props);
props = realprops;
fnvlist_add_nvlist(nvl, ZPOOL_VDEV_PROPS_SET_PROPS, props);
ret = lzc_set_vdev_prop(zhp->zpool_name, nvl, &outnvl);
nvlist_free(props);
nvlist_free(nvl);
nvlist_free(outnvl);
if (ret)
(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
return (ret);
}
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c b/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c
index 87a30f54fea8..e9bc78aa8d39 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c
@@ -1,5564 +1,5625 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
* Copyright (c) 2012 Pawel Jakub Dawidek <pawel@dawidek.net>.
* All rights reserved
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2019 Datto Inc.
*/
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <stddef.h>
#include <fcntl.h>
#include <sys/mount.h>
#include <sys/mntent.h>
#include <sys/mnttab.h>
#include <sys/avl.h>
#include <sys/debug.h>
#include <sys/stat.h>
#include <pthread.h>
#include <umem.h>
#include <time.h>
#include <libzfs.h>
#include <libzfs_core.h>
#include <libzutil.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
#include "zfs_fletcher.h"
#include "libzfs_impl.h"
#include <cityhash.h>
#include <zlib.h>
#include <sys/zio_checksum.h>
#include <sys/dsl_crypt.h>
#include <sys/ddt.h>
#include <sys/socket.h>
#include <sys/sha2.h>
static int zfs_receive_impl(libzfs_handle_t *, const char *, const char *,
recvflags_t *, int, const char *, nvlist_t *, avl_tree_t *, char **,
const char *, nvlist_t *);
static int guid_to_name_redact_snaps(libzfs_handle_t *hdl, const char *parent,
uint64_t guid, boolean_t bookmark_ok, uint64_t *redact_snap_guids,
uint64_t num_redact_snaps, char *name);
static int guid_to_name(libzfs_handle_t *, const char *,
uint64_t, boolean_t, char *);
typedef struct progress_arg {
zfs_handle_t *pa_zhp;
int pa_fd;
boolean_t pa_parsable;
boolean_t pa_estimate;
int pa_verbosity;
boolean_t pa_astitle;
boolean_t pa_progress;
uint64_t pa_size;
} progress_arg_t;
static int
dump_record(dmu_replay_record_t *drr, void *payload, size_t payload_len,
zio_cksum_t *zc, int outfd)
{
ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
fletcher_4_incremental_native(drr,
offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), zc);
if (drr->drr_type != DRR_BEGIN) {
ASSERT(ZIO_CHECKSUM_IS_ZERO(&drr->drr_u.
drr_checksum.drr_checksum));
drr->drr_u.drr_checksum.drr_checksum = *zc;
}
fletcher_4_incremental_native(&drr->drr_u.drr_checksum.drr_checksum,
sizeof (zio_cksum_t), zc);
if (write(outfd, drr, sizeof (*drr)) == -1)
return (errno);
if (payload_len != 0) {
fletcher_4_incremental_native(payload, payload_len, zc);
if (write(outfd, payload, payload_len) == -1)
return (errno);
}
return (0);
}
/*
* Routines for dealing with the AVL tree of fs-nvlists
*/
typedef struct fsavl_node {
avl_node_t fn_node;
nvlist_t *fn_nvfs;
const char *fn_snapname;
uint64_t fn_guid;
} fsavl_node_t;
static int
fsavl_compare(const void *arg1, const void *arg2)
{
const fsavl_node_t *fn1 = (const fsavl_node_t *)arg1;
const fsavl_node_t *fn2 = (const fsavl_node_t *)arg2;
return (TREE_CMP(fn1->fn_guid, fn2->fn_guid));
}
/*
* Given the GUID of a snapshot, find its containing filesystem and
* (optionally) name.
*/
static nvlist_t *
fsavl_find(avl_tree_t *avl, uint64_t snapguid, const char **snapname)
{
fsavl_node_t fn_find;
fsavl_node_t *fn;
fn_find.fn_guid = snapguid;
fn = avl_find(avl, &fn_find, NULL);
if (fn) {
if (snapname)
*snapname = fn->fn_snapname;
return (fn->fn_nvfs);
}
return (NULL);
}
static void
fsavl_destroy(avl_tree_t *avl)
{
fsavl_node_t *fn;
void *cookie;
if (avl == NULL)
return;
cookie = NULL;
while ((fn = avl_destroy_nodes(avl, &cookie)) != NULL)
free(fn);
avl_destroy(avl);
free(avl);
}
/*
* Given an nvlist, produce an avl tree of snapshots, ordered by guid
*/
static avl_tree_t *
fsavl_create(nvlist_t *fss)
{
avl_tree_t *fsavl;
nvpair_t *fselem = NULL;
if ((fsavl = malloc(sizeof (avl_tree_t))) == NULL)
return (NULL);
avl_create(fsavl, fsavl_compare, sizeof (fsavl_node_t),
offsetof(fsavl_node_t, fn_node));
while ((fselem = nvlist_next_nvpair(fss, fselem)) != NULL) {
nvlist_t *nvfs, *snaps;
nvpair_t *snapelem = NULL;
nvfs = fnvpair_value_nvlist(fselem);
snaps = fnvlist_lookup_nvlist(nvfs, "snaps");
while ((snapelem =
nvlist_next_nvpair(snaps, snapelem)) != NULL) {
fsavl_node_t *fn;
if ((fn = malloc(sizeof (fsavl_node_t))) == NULL) {
fsavl_destroy(fsavl);
return (NULL);
}
fn->fn_nvfs = nvfs;
fn->fn_snapname = nvpair_name(snapelem);
fn->fn_guid = fnvpair_value_uint64(snapelem);
/*
* Note: if there are multiple snaps with the
* same GUID, we ignore all but one.
*/
avl_index_t where = 0;
if (avl_find(fsavl, fn, &where) == NULL)
avl_insert(fsavl, fn, where);
else
free(fn);
}
}
return (fsavl);
}
/*
* Routines for dealing with the giant nvlist of fs-nvlists, etc.
*/
typedef struct send_data {
/*
* assigned inside every recursive call,
* restored from *_save on return:
*
* guid of fromsnap snapshot in parent dataset
* txg of fromsnap snapshot in current dataset
* txg of tosnap snapshot in current dataset
*/
uint64_t parent_fromsnap_guid;
uint64_t fromsnap_txg;
uint64_t tosnap_txg;
/* the nvlists get accumulated during depth-first traversal */
nvlist_t *parent_snaps;
nvlist_t *fss;
nvlist_t *snapprops;
nvlist_t *snapholds; /* user holds */
/* send-receive configuration, does not change during traversal */
const char *fsname;
const char *fromsnap;
const char *tosnap;
boolean_t recursive;
boolean_t raw;
boolean_t doall;
boolean_t replicate;
boolean_t skipmissing;
boolean_t verbose;
boolean_t backup;
boolean_t seenfrom;
boolean_t seento;
boolean_t holds; /* were holds requested with send -h */
boolean_t props;
/*
* The header nvlist is of the following format:
* {
* "tosnap" -> string
* "fromsnap" -> string (if incremental)
* "fss" -> {
* id -> {
*
* "name" -> string (full name; for debugging)
* "parentfromsnap" -> number (guid of fromsnap in parent)
*
* "props" -> { name -> value (only if set here) }
* "snaps" -> { name (lastname) -> number (guid) }
* "snapprops" -> { name (lastname) -> { name -> value } }
* "snapholds" -> { name (lastname) -> { holdname -> crtime } }
*
* "origin" -> number (guid) (if clone)
* "is_encroot" -> boolean
* "sent" -> boolean (not on-disk)
* }
* }
* }
*
*/
} send_data_t;
static void
send_iterate_prop(zfs_handle_t *zhp, boolean_t received_only, nvlist_t *nv);
/*
* Collect guid, valid props, optionally holds, etc. of a snapshot.
* This interface is intended for use as a zfs_iter_snapshots_v2_sorted visitor.
*/
static int
send_iterate_snap(zfs_handle_t *zhp, void *arg)
{
send_data_t *sd = arg;
uint64_t guid = zhp->zfs_dmustats.dds_guid;
uint64_t txg = zhp->zfs_dmustats.dds_creation_txg;
boolean_t isfromsnap, istosnap, istosnapwithnofrom;
char *snapname;
const char *from = sd->fromsnap;
const char *to = sd->tosnap;
snapname = strrchr(zhp->zfs_name, '@');
assert(snapname != NULL);
++snapname;
isfromsnap = (from != NULL && strcmp(from, snapname) == 0);
istosnap = (to != NULL && strcmp(to, snapname) == 0);
istosnapwithnofrom = (istosnap && from == NULL);
if (sd->tosnap_txg != 0 && txg > sd->tosnap_txg) {
if (sd->verbose) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"skipping snapshot %s because it was created "
"after the destination snapshot (%s)\n"),
zhp->zfs_name, to);
}
zfs_close(zhp);
return (0);
}
fnvlist_add_uint64(sd->parent_snaps, snapname, guid);
/*
* NB: if there is no fromsnap here (it's a newly created fs in
* an incremental replication), we will substitute the tosnap.
*/
if (isfromsnap || (sd->parent_fromsnap_guid == 0 && istosnap))
sd->parent_fromsnap_guid = guid;
if (!sd->recursive) {
/*
* To allow a doall stream to work properly
* with a NULL fromsnap
*/
if (sd->doall && from == NULL && !sd->seenfrom)
sd->seenfrom = B_TRUE;
if (!sd->seenfrom && isfromsnap) {
sd->seenfrom = B_TRUE;
zfs_close(zhp);
return (0);
}
if ((sd->seento || !sd->seenfrom) && !istosnapwithnofrom) {
zfs_close(zhp);
return (0);
}
if (istosnap)
sd->seento = B_TRUE;
}
nvlist_t *nv = fnvlist_alloc();
send_iterate_prop(zhp, sd->backup, nv);
fnvlist_add_nvlist(sd->snapprops, snapname, nv);
fnvlist_free(nv);
if (sd->holds) {
nvlist_t *holds;
if (lzc_get_holds(zhp->zfs_name, &holds) == 0) {
fnvlist_add_nvlist(sd->snapholds, snapname, holds);
fnvlist_free(holds);
}
}
zfs_close(zhp);
return (0);
}
/*
* Collect all valid props from the handle snap into an nvlist.
*/
static void
send_iterate_prop(zfs_handle_t *zhp, boolean_t received_only, nvlist_t *nv)
{
nvlist_t *props;
if (received_only)
props = zfs_get_recvd_props(zhp);
else
props = zhp->zfs_props;
nvpair_t *elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
const char *propname = nvpair_name(elem);
zfs_prop_t prop = zfs_name_to_prop(propname);
if (!zfs_prop_user(propname)) {
/*
* Realistically, this should never happen. However,
* we want the ability to add DSL properties without
* needing to make incompatible version changes. We
* need to ignore unknown properties to allow older
* software to still send datasets containing these
* properties, with the unknown properties elided.
*/
if (prop == ZPROP_INVAL)
continue;
if (zfs_prop_readonly(prop))
continue;
}
nvlist_t *propnv = fnvpair_value_nvlist(elem);
boolean_t isspacelimit = (prop == ZFS_PROP_QUOTA ||
prop == ZFS_PROP_RESERVATION ||
prop == ZFS_PROP_REFQUOTA ||
prop == ZFS_PROP_REFRESERVATION);
if (isspacelimit && zhp->zfs_type == ZFS_TYPE_SNAPSHOT)
continue;
const char *source;
if (nvlist_lookup_string(propnv, ZPROP_SOURCE, &source) == 0) {
if (strcmp(source, zhp->zfs_name) != 0 &&
strcmp(source, ZPROP_SOURCE_VAL_RECVD) != 0)
continue;
} else {
/*
* May have no source before SPA_VERSION_RECVD_PROPS,
* but is still modifiable.
*/
if (!isspacelimit)
continue;
}
if (zfs_prop_user(propname) ||
zfs_prop_get_type(prop) == PROP_TYPE_STRING) {
const char *value;
value = fnvlist_lookup_string(propnv, ZPROP_VALUE);
fnvlist_add_string(nv, propname, value);
} else {
uint64_t value;
value = fnvlist_lookup_uint64(propnv, ZPROP_VALUE);
fnvlist_add_uint64(nv, propname, value);
}
}
}
/*
* returns snapshot guid
* and returns 0 if the snapshot does not exist
*/
static uint64_t
get_snap_guid(libzfs_handle_t *hdl, const char *fs, const char *snap)
{
char name[MAXPATHLEN + 1];
uint64_t guid = 0;
if (fs == NULL || fs[0] == '\0' || snap == NULL || snap[0] == '\0')
return (guid);
(void) snprintf(name, sizeof (name), "%s@%s", fs, snap);
zfs_handle_t *zhp = zfs_open(hdl, name, ZFS_TYPE_SNAPSHOT);
if (zhp != NULL) {
guid = zfs_prop_get_int(zhp, ZFS_PROP_GUID);
zfs_close(zhp);
}
return (guid);
}
/*
* returns snapshot creation txg
* and returns 0 if the snapshot does not exist
*/
static uint64_t
get_snap_txg(libzfs_handle_t *hdl, const char *fs, const char *snap)
{
char name[ZFS_MAX_DATASET_NAME_LEN];
uint64_t txg = 0;
if (fs == NULL || fs[0] == '\0' || snap == NULL || snap[0] == '\0')
return (txg);
(void) snprintf(name, sizeof (name), "%s@%s", fs, snap);
if (zfs_dataset_exists(hdl, name, ZFS_TYPE_SNAPSHOT)) {
zfs_handle_t *zhp = zfs_open(hdl, name, ZFS_TYPE_SNAPSHOT);
if (zhp != NULL) {
txg = zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG);
zfs_close(zhp);
}
}
return (txg);
}
/*
* Recursively generate nvlists describing datasets. See comment
* for the data structure send_data_t above for description of contents
* of the nvlist.
*/
static int
send_iterate_fs(zfs_handle_t *zhp, void *arg)
{
send_data_t *sd = arg;
nvlist_t *nvfs = NULL, *nv = NULL;
int rv = 0;
uint64_t min_txg = 0, max_txg = 0;
uint64_t txg = zhp->zfs_dmustats.dds_creation_txg;
uint64_t guid = zhp->zfs_dmustats.dds_guid;
uint64_t fromsnap_txg, tosnap_txg;
char guidstring[64];
/* These fields are restored on return from a recursive call. */
uint64_t parent_fromsnap_guid_save = sd->parent_fromsnap_guid;
uint64_t fromsnap_txg_save = sd->fromsnap_txg;
uint64_t tosnap_txg_save = sd->tosnap_txg;
fromsnap_txg = get_snap_txg(zhp->zfs_hdl, zhp->zfs_name, sd->fromsnap);
if (fromsnap_txg != 0)
sd->fromsnap_txg = fromsnap_txg;
tosnap_txg = get_snap_txg(zhp->zfs_hdl, zhp->zfs_name, sd->tosnap);
if (tosnap_txg != 0)
sd->tosnap_txg = tosnap_txg;
/*
* On the send side, if the current dataset does not have tosnap,
* perform two additional checks:
*
* - Skip sending the current dataset if it was created later than
* the parent tosnap.
* - Return error if the current dataset was created earlier than
* the parent tosnap, unless --skip-missing specified. Then
* just print a warning.
*/
if (sd->tosnap != NULL && tosnap_txg == 0) {
if (sd->tosnap_txg != 0 && txg > sd->tosnap_txg) {
if (sd->verbose) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"skipping dataset %s: snapshot %s does "
"not exist\n"), zhp->zfs_name, sd->tosnap);
}
} else if (sd->skipmissing) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: skipping dataset %s and its children:"
" snapshot %s does not exist\n"),
zhp->zfs_name, sd->tosnap);
} else {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"cannot send %s@%s%s: snapshot %s@%s does not "
"exist\n"), sd->fsname, sd->tosnap, sd->recursive ?
dgettext(TEXT_DOMAIN, " recursively") : "",
zhp->zfs_name, sd->tosnap);
rv = EZFS_NOENT;
}
goto out;
}
nvfs = fnvlist_alloc();
fnvlist_add_string(nvfs, "name", zhp->zfs_name);
fnvlist_add_uint64(nvfs, "parentfromsnap", sd->parent_fromsnap_guid);
if (zhp->zfs_dmustats.dds_origin[0] != '\0') {
zfs_handle_t *origin = zfs_open(zhp->zfs_hdl,
zhp->zfs_dmustats.dds_origin, ZFS_TYPE_SNAPSHOT);
if (origin == NULL) {
rv = -1;
goto out;
}
fnvlist_add_uint64(nvfs, "origin",
origin->zfs_dmustats.dds_guid);
zfs_close(origin);
}
/* Iterate over props. */
if (sd->props || sd->backup || sd->recursive) {
nv = fnvlist_alloc();
send_iterate_prop(zhp, sd->backup, nv);
fnvlist_add_nvlist(nvfs, "props", nv);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) {
boolean_t encroot;
/* Determine if this dataset is an encryption root. */
if (zfs_crypto_get_encryption_root(zhp, &encroot, NULL) != 0) {
rv = -1;
goto out;
}
if (encroot)
fnvlist_add_boolean(nvfs, "is_encroot");
/*
* Encrypted datasets can only be sent with properties if
* the raw flag is specified because the receive side doesn't
* currently have a mechanism for recursively asking the user
* for new encryption parameters.
*/
if (!sd->raw) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"cannot send %s@%s: encrypted dataset %s may not "
"be sent with properties without the raw flag\n"),
sd->fsname, sd->tosnap, zhp->zfs_name);
rv = -1;
goto out;
}
}
/*
* Iterate over snaps, and set sd->parent_fromsnap_guid.
*
* If this is a "doall" send, a replicate send or we're just trying
* to gather a list of previous snapshots, iterate through all the
* snaps in the txg range. Otherwise just look at the one we're
* interested in.
*/
sd->parent_fromsnap_guid = 0;
sd->parent_snaps = fnvlist_alloc();
sd->snapprops = fnvlist_alloc();
if (sd->holds)
sd->snapholds = fnvlist_alloc();
if (sd->doall || sd->replicate || sd->tosnap == NULL) {
if (!sd->replicate && fromsnap_txg != 0)
min_txg = fromsnap_txg;
if (!sd->replicate && tosnap_txg != 0)
max_txg = tosnap_txg;
(void) zfs_iter_snapshots_sorted_v2(zhp, 0, send_iterate_snap,
sd, min_txg, max_txg);
} else {
char snapname[MAXPATHLEN] = { 0 };
zfs_handle_t *snap;
(void) snprintf(snapname, sizeof (snapname), "%s@%s",
zhp->zfs_name, sd->tosnap);
if (sd->fromsnap != NULL)
sd->seenfrom = B_TRUE;
snap = zfs_open(zhp->zfs_hdl, snapname, ZFS_TYPE_SNAPSHOT);
if (snap != NULL)
(void) send_iterate_snap(snap, sd);
}
fnvlist_add_nvlist(nvfs, "snaps", sd->parent_snaps);
fnvlist_free(sd->parent_snaps);
fnvlist_add_nvlist(nvfs, "snapprops", sd->snapprops);
fnvlist_free(sd->snapprops);
if (sd->holds) {
fnvlist_add_nvlist(nvfs, "snapholds", sd->snapholds);
fnvlist_free(sd->snapholds);
}
/* Do not allow the size of the properties list to exceed the limit */
if ((fnvlist_size(nvfs) + fnvlist_size(sd->fss)) >
zhp->zfs_hdl->libzfs_max_nvlist) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"warning: cannot send %s@%s: the size of the list of "
"snapshots and properties is too large to be received "
"successfully.\n"
"Select a smaller number of snapshots to send.\n"),
zhp->zfs_name, sd->tosnap);
rv = EZFS_NOSPC;
goto out;
}
/* Add this fs to nvlist. */
(void) snprintf(guidstring, sizeof (guidstring),
"0x%llx", (longlong_t)guid);
fnvlist_add_nvlist(sd->fss, guidstring, nvfs);
/* Iterate over children. */
if (sd->recursive)
rv = zfs_iter_filesystems_v2(zhp, 0, send_iterate_fs, sd);
out:
/* Restore saved fields. */
sd->parent_fromsnap_guid = parent_fromsnap_guid_save;
sd->fromsnap_txg = fromsnap_txg_save;
sd->tosnap_txg = tosnap_txg_save;
fnvlist_free(nv);
fnvlist_free(nvfs);
zfs_close(zhp);
return (rv);
}
static int
gather_nvlist(libzfs_handle_t *hdl, const char *fsname, const char *fromsnap,
const char *tosnap, boolean_t recursive, boolean_t raw, boolean_t doall,
boolean_t replicate, boolean_t skipmissing, boolean_t verbose,
boolean_t backup, boolean_t holds, boolean_t props, nvlist_t **nvlp,
avl_tree_t **avlp)
{
zfs_handle_t *zhp;
send_data_t sd = { 0 };
int error;
zhp = zfs_open(hdl, fsname, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
return (EZFS_BADTYPE);
sd.fss = fnvlist_alloc();
sd.fsname = fsname;
sd.fromsnap = fromsnap;
sd.tosnap = tosnap;
sd.recursive = recursive;
sd.raw = raw;
sd.doall = doall;
sd.replicate = replicate;
sd.skipmissing = skipmissing;
sd.verbose = verbose;
sd.backup = backup;
sd.holds = holds;
sd.props = props;
if ((error = send_iterate_fs(zhp, &sd)) != 0) {
fnvlist_free(sd.fss);
if (avlp != NULL)
*avlp = NULL;
*nvlp = NULL;
return (error);
}
if (avlp != NULL && (*avlp = fsavl_create(sd.fss)) == NULL) {
fnvlist_free(sd.fss);
*nvlp = NULL;
return (EZFS_NOMEM);
}
*nvlp = sd.fss;
return (0);
}
/*
* Routines specific to "zfs send"
*/
typedef struct send_dump_data {
/* these are all just the short snapname (the part after the @) */
const char *fromsnap;
const char *tosnap;
char prevsnap[ZFS_MAX_DATASET_NAME_LEN];
uint64_t prevsnap_obj;
boolean_t seenfrom, seento, replicate, doall, fromorigin;
boolean_t dryrun, parsable, progress, embed_data, std_out;
boolean_t large_block, compress, raw, holds;
boolean_t progressastitle;
int outfd;
boolean_t err;
nvlist_t *fss;
nvlist_t *snapholds;
avl_tree_t *fsavl;
snapfilter_cb_t *filter_cb;
void *filter_cb_arg;
nvlist_t *debugnv;
char holdtag[ZFS_MAX_DATASET_NAME_LEN];
int cleanup_fd;
int verbosity;
uint64_t size;
} send_dump_data_t;
static int
zfs_send_space(zfs_handle_t *zhp, const char *snapname, const char *from,
enum lzc_send_flags flags, uint64_t *spacep)
{
assert(snapname != NULL);
int error = lzc_send_space(snapname, from, flags, spacep);
if (error == 0)
return (0);
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot estimate space for '%s'"), snapname);
libzfs_handle_t *hdl = zhp->zfs_hdl;
switch (error) {
case EXDEV:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not an earlier snapshot from the same fs"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
case ENOENT:
if (zfs_dataset_exists(hdl, snapname,
ZFS_TYPE_SNAPSHOT)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source (%s) does not exist"),
snapname);
}
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EDQUOT:
case EFBIG:
case EIO:
case ENOLINK:
case ENOSPC:
case ENOSTR:
case ENXIO:
case EPIPE:
case ERANGE:
case EFAULT:
case EROFS:
case EINVAL:
zfs_error_aux(hdl, "%s", strerror(error));
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
default:
return (zfs_standard_error(hdl, error, errbuf));
}
}
/*
* Dumps a backup of the given snapshot (incremental from fromsnap if it's not
* NULL) to the file descriptor specified by outfd.
*/
static int
dump_ioctl(zfs_handle_t *zhp, const char *fromsnap, uint64_t fromsnap_obj,
boolean_t fromorigin, int outfd, enum lzc_send_flags flags,
nvlist_t *debugnv)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *thisdbg;
assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
assert(fromsnap_obj == 0 || !fromorigin);
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
zc.zc_cookie = outfd;
zc.zc_obj = fromorigin;
zc.zc_sendobj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID);
zc.zc_fromobj = fromsnap_obj;
zc.zc_flags = flags;
if (debugnv != NULL) {
thisdbg = fnvlist_alloc();
if (fromsnap != NULL && fromsnap[0] != '\0')
fnvlist_add_string(thisdbg, "fromsnap", fromsnap);
}
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SEND, &zc) != 0) {
char errbuf[ERRBUFLEN];
int error = errno;
(void) snprintf(errbuf, sizeof (errbuf), "%s '%s'",
dgettext(TEXT_DOMAIN, "warning: cannot send"),
zhp->zfs_name);
if (debugnv != NULL) {
fnvlist_add_uint64(thisdbg, "error", error);
fnvlist_add_nvlist(debugnv, zhp->zfs_name, thisdbg);
fnvlist_free(thisdbg);
}
switch (error) {
case EXDEV:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not an earlier snapshot from the same fs"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
case EACCES:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"source key must be loaded"));
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
case ENOENT:
if (zfs_dataset_exists(hdl, zc.zc_name,
ZFS_TYPE_SNAPSHOT)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source (@%s) does not exist"),
zc.zc_value);
}
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EDQUOT:
case EFBIG:
case EIO:
case ENOLINK:
case ENOSPC:
case ENOSTR:
case ENXIO:
case EPIPE:
case ERANGE:
case EFAULT:
case EROFS:
case EINVAL:
zfs_error_aux(hdl, "%s", strerror(errno));
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
default:
return (zfs_standard_error(hdl, errno, errbuf));
}
}
if (debugnv != NULL) {
fnvlist_add_nvlist(debugnv, zhp->zfs_name, thisdbg);
fnvlist_free(thisdbg);
}
return (0);
}
static void
gather_holds(zfs_handle_t *zhp, send_dump_data_t *sdd)
{
assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
/*
* zfs_send() only sets snapholds for sends that need them,
* e.g. replication and doall.
*/
if (sdd->snapholds == NULL)
return;
fnvlist_add_string(sdd->snapholds, zhp->zfs_name, sdd->holdtag);
}
int
zfs_send_progress(zfs_handle_t *zhp, int fd, uint64_t *bytes_written,
uint64_t *blocks_visited)
{
zfs_cmd_t zc = {"\0"};
if (bytes_written != NULL)
*bytes_written = 0;
if (blocks_visited != NULL)
*blocks_visited = 0;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
zc.zc_cookie = fd;
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SEND_PROGRESS, &zc) != 0)
return (errno);
if (bytes_written != NULL)
*bytes_written = zc.zc_cookie;
if (blocks_visited != NULL)
*blocks_visited = zc.zc_objset_type;
return (0);
}
+static volatile boolean_t send_progress_thread_signal_duetotimer;
+static void
+send_progress_thread_act(int sig, siginfo_t *info, void *ucontext)
+{
+ (void) sig, (void) ucontext;
+ send_progress_thread_signal_duetotimer = info->si_code == SI_TIMER;
+}
+
+struct timer_desirability {
+ timer_t timer;
+ boolean_t desired;
+};
+static void
+timer_delete_cleanup(void *timer)
+{
+ struct timer_desirability *td = timer;
+ if (td->desired)
+ timer_delete(td->timer);
+}
+
+#ifdef SIGINFO
+#define SEND_PROGRESS_THREAD_PARENT_BLOCK_SIGINFO sigaddset(&new, SIGINFO)
+#else
+#define SEND_PROGRESS_THREAD_PARENT_BLOCK_SIGINFO
+#endif
+#define SEND_PROGRESS_THREAD_PARENT_BLOCK(old) { \
+ sigset_t new; \
+ sigemptyset(&new); \
+ sigaddset(&new, SIGUSR1); \
+ SEND_PROGRESS_THREAD_PARENT_BLOCK_SIGINFO; \
+ pthread_sigmask(SIG_BLOCK, &new, old); \
+}
+
static void *
send_progress_thread(void *arg)
{
progress_arg_t *pa = arg;
zfs_handle_t *zhp = pa->pa_zhp;
uint64_t bytes;
uint64_t blocks;
uint64_t total = pa->pa_size / 100;
char buf[16];
time_t t;
struct tm tm;
int err;
+ const struct sigaction signal_action =
+ {.sa_sigaction = send_progress_thread_act, .sa_flags = SA_SIGINFO};
+ struct sigevent timer_cfg =
+ {.sigev_notify = SIGEV_SIGNAL, .sigev_signo = SIGUSR1};
+ const struct itimerspec timer_time =
+ {.it_value = {.tv_sec = 1}, .it_interval = {.tv_sec = 1}};
+ struct timer_desirability timer = {};
+
+ sigaction(SIGUSR1, &signal_action, NULL);
+#ifdef SIGINFO
+ sigaction(SIGINFO, &signal_action, NULL);
+#endif
+
+ if ((timer.desired = pa->pa_progress || pa->pa_astitle)) {
+ if (timer_create(CLOCK_MONOTONIC, &timer_cfg, &timer.timer))
+ return ((void *)(uintptr_t)errno);
+ (void) timer_settime(timer.timer, 0, &timer_time, NULL);
+ }
+ pthread_cleanup_push(timer_delete_cleanup, &timer);
+
if (!pa->pa_parsable && pa->pa_progress) {
(void) fprintf(stderr,
"TIME %s %sSNAPSHOT %s\n",
pa->pa_estimate ? "BYTES" : " SENT",
pa->pa_verbosity >= 2 ? " BLOCKS " : "",
zhp->zfs_name);
}
/*
* Print the progress from ZFS_IOC_SEND_PROGRESS every second.
*/
for (;;) {
- (void) sleep(1);
+ pause();
if ((err = zfs_send_progress(zhp, pa->pa_fd, &bytes,
&blocks)) != 0) {
if (err == EINTR || err == ENOENT)
- return ((void *)0);
- return ((void *)(uintptr_t)err);
+ err = 0;
+ pthread_exit(((void *)(uintptr_t)err));
}
(void) time(&t);
localtime_r(&t, &tm);
if (pa->pa_astitle) {
char buf_bytes[16];
char buf_size[16];
int pct;
zfs_nicenum(bytes, buf_bytes, sizeof (buf_bytes));
zfs_nicenum(pa->pa_size, buf_size, sizeof (buf_size));
pct = (total > 0) ? bytes / total : 100;
zfs_setproctitle("sending %s (%d%%: %s/%s)",
zhp->zfs_name, MIN(pct, 100), buf_bytes, buf_size);
}
if (pa->pa_verbosity >= 2 && pa->pa_parsable) {
(void) fprintf(stderr,
"%02d:%02d:%02d\t%llu\t%llu\t%s\n",
tm.tm_hour, tm.tm_min, tm.tm_sec,
(u_longlong_t)bytes, (u_longlong_t)blocks,
zhp->zfs_name);
} else if (pa->pa_verbosity >= 2) {
zfs_nicenum(bytes, buf, sizeof (buf));
(void) fprintf(stderr,
"%02d:%02d:%02d %5s %8llu %s\n",
tm.tm_hour, tm.tm_min, tm.tm_sec,
buf, (u_longlong_t)blocks, zhp->zfs_name);
} else if (pa->pa_parsable) {
(void) fprintf(stderr, "%02d:%02d:%02d\t%llu\t%s\n",
tm.tm_hour, tm.tm_min, tm.tm_sec,
(u_longlong_t)bytes, zhp->zfs_name);
- } else if (pa->pa_progress) {
+ } else if (pa->pa_progress ||
+ !send_progress_thread_signal_duetotimer) {
zfs_nicebytes(bytes, buf, sizeof (buf));
(void) fprintf(stderr, "%02d:%02d:%02d %5s %s\n",
tm.tm_hour, tm.tm_min, tm.tm_sec,
buf, zhp->zfs_name);
}
}
+ pthread_cleanup_pop(B_TRUE);
}
static boolean_t
-send_progress_thread_exit(libzfs_handle_t *hdl, pthread_t ptid)
+send_progress_thread_exit(
+ libzfs_handle_t *hdl, pthread_t ptid, sigset_t *oldmask)
{
void *status = NULL;
(void) pthread_cancel(ptid);
(void) pthread_join(ptid, &status);
+ pthread_sigmask(SIG_SETMASK, oldmask, NULL);
int error = (int)(uintptr_t)status;
if (error != 0 && status != PTHREAD_CANCELED)
return (zfs_standard_error(hdl, error,
dgettext(TEXT_DOMAIN, "progress thread exited nonzero")));
else
return (B_FALSE);
}
static void
send_print_verbose(FILE *fout, const char *tosnap, const char *fromsnap,
uint64_t size, boolean_t parsable)
{
if (parsable) {
if (fromsnap != NULL) {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"incremental\t%s\t%s"), fromsnap, tosnap);
} else {
/*
* Workaround for GCC 12+ with UBSan enabled deficencies.
*
* GCC 12+ invoked with -fsanitize=undefined incorrectly reports the code
* below as violating -Wformat-overflow.
*/
#if defined(__GNUC__) && !defined(__clang__) && \
defined(ZFS_UBSAN_ENABLED) && defined(HAVE_FORMAT_OVERFLOW)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wformat-overflow"
#endif
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"full\t%s"), tosnap);
#if defined(__GNUC__) && !defined(__clang__) && \
defined(ZFS_UBSAN_ENABLED) && defined(HAVE_FORMAT_OVERFLOW)
#pragma GCC diagnostic pop
#endif
}
(void) fprintf(fout, "\t%llu", (longlong_t)size);
} else {
if (fromsnap != NULL) {
if (strchr(fromsnap, '@') == NULL &&
strchr(fromsnap, '#') == NULL) {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"send from @%s to %s"), fromsnap, tosnap);
} else {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"send from %s to %s"), fromsnap, tosnap);
}
} else {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"full send of %s"), tosnap);
}
if (size != 0) {
char buf[16];
zfs_nicebytes(size, buf, sizeof (buf));
/*
* Workaround for GCC 12+ with UBSan enabled deficencies.
*
* GCC 12+ invoked with -fsanitize=undefined incorrectly reports the code
* below as violating -Wformat-overflow.
*/
#if defined(__GNUC__) && !defined(__clang__) && \
defined(ZFS_UBSAN_ENABLED) && defined(HAVE_FORMAT_OVERFLOW)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wformat-overflow"
#endif
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
" estimated size is %s"), buf);
#if defined(__GNUC__) && !defined(__clang__) && \
defined(ZFS_UBSAN_ENABLED) && defined(HAVE_FORMAT_OVERFLOW)
#pragma GCC diagnostic pop
#endif
}
}
(void) fprintf(fout, "\n");
}
/*
* Send a single filesystem snapshot, updating the send dump data.
* This interface is intended for use as a zfs_iter_snapshots_v2_sorted visitor.
*/
static int
dump_snapshot(zfs_handle_t *zhp, void *arg)
{
send_dump_data_t *sdd = arg;
progress_arg_t pa = { 0 };
pthread_t tid;
char *thissnap;
enum lzc_send_flags flags = 0;
int err;
boolean_t isfromsnap, istosnap, fromorigin;
boolean_t exclude = B_FALSE;
FILE *fout = sdd->std_out ? stdout : stderr;
err = 0;
thissnap = strchr(zhp->zfs_name, '@') + 1;
isfromsnap = (sdd->fromsnap != NULL &&
strcmp(sdd->fromsnap, thissnap) == 0);
if (!sdd->seenfrom && isfromsnap) {
gather_holds(zhp, sdd);
sdd->seenfrom = B_TRUE;
(void) strlcpy(sdd->prevsnap, thissnap, sizeof (sdd->prevsnap));
sdd->prevsnap_obj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID);
zfs_close(zhp);
return (0);
}
if (sdd->seento || !sdd->seenfrom) {
zfs_close(zhp);
return (0);
}
istosnap = (strcmp(sdd->tosnap, thissnap) == 0);
if (istosnap)
sdd->seento = B_TRUE;
if (sdd->large_block)
flags |= LZC_SEND_FLAG_LARGE_BLOCK;
if (sdd->embed_data)
flags |= LZC_SEND_FLAG_EMBED_DATA;
if (sdd->compress)
flags |= LZC_SEND_FLAG_COMPRESS;
if (sdd->raw)
flags |= LZC_SEND_FLAG_RAW;
if (!sdd->doall && !isfromsnap && !istosnap) {
if (sdd->replicate) {
const char *snapname;
nvlist_t *snapprops;
/*
* Filter out all intermediate snapshots except origin
* snapshots needed to replicate clones.
*/
nvlist_t *nvfs = fsavl_find(sdd->fsavl,
zhp->zfs_dmustats.dds_guid, &snapname);
if (nvfs != NULL) {
snapprops = fnvlist_lookup_nvlist(nvfs,
"snapprops");
snapprops = fnvlist_lookup_nvlist(snapprops,
thissnap);
exclude = !nvlist_exists(snapprops,
"is_clone_origin");
}
} else {
exclude = B_TRUE;
}
}
/*
* If a filter function exists, call it to determine whether
* this snapshot will be sent.
*/
if (exclude || (sdd->filter_cb != NULL &&
sdd->filter_cb(zhp, sdd->filter_cb_arg) == B_FALSE)) {
/*
* This snapshot is filtered out. Don't send it, and don't
* set prevsnap_obj, so it will be as if this snapshot didn't
* exist, and the next accepted snapshot will be sent as
* an incremental from the last accepted one, or as the
* first (and full) snapshot in the case of a replication,
* non-incremental send.
*/
zfs_close(zhp);
return (0);
}
gather_holds(zhp, sdd);
fromorigin = sdd->prevsnap[0] == '\0' &&
(sdd->fromorigin || sdd->replicate);
if (sdd->verbosity != 0) {
uint64_t size = 0;
char fromds[ZFS_MAX_DATASET_NAME_LEN];
if (sdd->prevsnap[0] != '\0') {
(void) strlcpy(fromds, zhp->zfs_name, sizeof (fromds));
*(strchr(fromds, '@') + 1) = '\0';
(void) strlcat(fromds, sdd->prevsnap, sizeof (fromds));
}
if (zfs_send_space(zhp, zhp->zfs_name,
sdd->prevsnap[0] ? fromds : NULL, flags, &size) == 0) {
send_print_verbose(fout, zhp->zfs_name,
sdd->prevsnap[0] ? sdd->prevsnap : NULL,
size, sdd->parsable);
sdd->size += size;
}
}
if (!sdd->dryrun) {
/*
* If progress reporting is requested, spawn a new thread to
* poll ZFS_IOC_SEND_PROGRESS at a regular interval.
*/
- if (sdd->progress || sdd->progressastitle) {
+ sigset_t oldmask;
+ {
pa.pa_zhp = zhp;
pa.pa_fd = sdd->outfd;
pa.pa_parsable = sdd->parsable;
pa.pa_estimate = B_FALSE;
pa.pa_verbosity = sdd->verbosity;
pa.pa_size = sdd->size;
pa.pa_astitle = sdd->progressastitle;
pa.pa_progress = sdd->progress;
if ((err = pthread_create(&tid, NULL,
send_progress_thread, &pa)) != 0) {
zfs_close(zhp);
return (err);
}
+ SEND_PROGRESS_THREAD_PARENT_BLOCK(&oldmask);
}
err = dump_ioctl(zhp, sdd->prevsnap, sdd->prevsnap_obj,
fromorigin, sdd->outfd, flags, sdd->debugnv);
- if ((sdd->progress || sdd->progressastitle) &&
- send_progress_thread_exit(zhp->zfs_hdl, tid))
+ if (send_progress_thread_exit(zhp->zfs_hdl, tid, &oldmask))
return (-1);
}
(void) strlcpy(sdd->prevsnap, thissnap, sizeof (sdd->prevsnap));
sdd->prevsnap_obj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID);
zfs_close(zhp);
return (err);
}
/*
* Send all snapshots for a filesystem, updating the send dump data.
*/
static int
dump_filesystem(zfs_handle_t *zhp, send_dump_data_t *sdd)
{
int rv = 0;
boolean_t missingfrom = B_FALSE;
zfs_cmd_t zc = {"\0"};
uint64_t min_txg = 0, max_txg = 0;
/*
* Make sure the tosnap exists.
*/
(void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s",
zhp->zfs_name, sdd->tosnap);
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_OBJSET_STATS, &zc) != 0) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: could not send %s@%s: does not exist\n"),
zhp->zfs_name, sdd->tosnap);
sdd->err = B_TRUE;
return (0);
}
/*
* If this fs does not have fromsnap, and we're doing
* recursive, we need to send a full stream from the
* beginning (or an incremental from the origin if this
* is a clone). If we're doing non-recursive, then let
* them get the error.
*/
if (sdd->replicate && sdd->fromsnap) {
/*
* Make sure the fromsnap exists.
*/
(void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s",
zhp->zfs_name, sdd->fromsnap);
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_OBJSET_STATS, &zc) != 0)
missingfrom = B_TRUE;
}
sdd->seenfrom = sdd->seento = B_FALSE;
sdd->prevsnap[0] = '\0';
sdd->prevsnap_obj = 0;
if (sdd->fromsnap == NULL || missingfrom)
sdd->seenfrom = B_TRUE;
/*
* Iterate through all snapshots and process the ones we will be
* sending. If we only have a "from" and "to" snapshot to deal
* with, we can avoid iterating through all the other snapshots.
*/
if (sdd->doall || sdd->replicate || sdd->tosnap == NULL) {
if (!sdd->replicate) {
if (sdd->fromsnap != NULL) {
min_txg = get_snap_txg(zhp->zfs_hdl,
zhp->zfs_name, sdd->fromsnap);
}
if (sdd->tosnap != NULL) {
max_txg = get_snap_txg(zhp->zfs_hdl,
zhp->zfs_name, sdd->tosnap);
}
}
rv = zfs_iter_snapshots_sorted_v2(zhp, 0, dump_snapshot, sdd,
min_txg, max_txg);
} else {
char snapname[MAXPATHLEN] = { 0 };
zfs_handle_t *snap;
/* Dump fromsnap. */
if (!sdd->seenfrom) {
(void) snprintf(snapname, sizeof (snapname),
"%s@%s", zhp->zfs_name, sdd->fromsnap);
snap = zfs_open(zhp->zfs_hdl, snapname,
ZFS_TYPE_SNAPSHOT);
if (snap != NULL)
rv = dump_snapshot(snap, sdd);
else
rv = errno;
}
/* Dump tosnap. */
if (rv == 0) {
(void) snprintf(snapname, sizeof (snapname),
"%s@%s", zhp->zfs_name, sdd->tosnap);
snap = zfs_open(zhp->zfs_hdl, snapname,
ZFS_TYPE_SNAPSHOT);
if (snap != NULL)
rv = dump_snapshot(snap, sdd);
else
rv = errno;
}
}
if (!sdd->seenfrom) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: could not send %s@%s:\n"
"incremental source (%s@%s) does not exist\n"),
zhp->zfs_name, sdd->tosnap,
zhp->zfs_name, sdd->fromsnap);
sdd->err = B_TRUE;
} else if (!sdd->seento) {
if (sdd->fromsnap) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: could not send %s@%s:\n"
"incremental source (%s@%s) "
"is not earlier than it\n"),
zhp->zfs_name, sdd->tosnap,
zhp->zfs_name, sdd->fromsnap);
} else {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: "
"could not send %s@%s: does not exist\n"),
zhp->zfs_name, sdd->tosnap);
}
sdd->err = B_TRUE;
}
return (rv);
}
/*
* Send all snapshots for all filesystems in sdd.
*/
static int
dump_filesystems(zfs_handle_t *rzhp, send_dump_data_t *sdd)
{
nvpair_t *fspair;
boolean_t needagain, progress;
if (!sdd->replicate)
return (dump_filesystem(rzhp, sdd));
/* Mark the clone origin snapshots. */
for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
nvlist_t *nvfs;
uint64_t origin_guid = 0;
nvfs = fnvpair_value_nvlist(fspair);
(void) nvlist_lookup_uint64(nvfs, "origin", &origin_guid);
if (origin_guid != 0) {
const char *snapname;
nvlist_t *origin_nv = fsavl_find(sdd->fsavl,
origin_guid, &snapname);
if (origin_nv != NULL) {
nvlist_t *snapprops;
snapprops = fnvlist_lookup_nvlist(origin_nv,
"snapprops");
snapprops = fnvlist_lookup_nvlist(snapprops,
snapname);
fnvlist_add_boolean(snapprops,
"is_clone_origin");
}
}
}
again:
needagain = progress = B_FALSE;
for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
nvlist_t *fslist, *parent_nv;
const char *fsname;
zfs_handle_t *zhp;
int err;
uint64_t origin_guid = 0;
uint64_t parent_guid = 0;
fslist = fnvpair_value_nvlist(fspair);
if (nvlist_lookup_boolean(fslist, "sent") == 0)
continue;
fsname = fnvlist_lookup_string(fslist, "name");
(void) nvlist_lookup_uint64(fslist, "origin", &origin_guid);
(void) nvlist_lookup_uint64(fslist, "parentfromsnap",
&parent_guid);
if (parent_guid != 0) {
parent_nv = fsavl_find(sdd->fsavl, parent_guid, NULL);
if (!nvlist_exists(parent_nv, "sent")) {
/* Parent has not been sent; skip this one. */
needagain = B_TRUE;
continue;
}
}
if (origin_guid != 0) {
nvlist_t *origin_nv = fsavl_find(sdd->fsavl,
origin_guid, NULL);
if (origin_nv != NULL &&
!nvlist_exists(origin_nv, "sent")) {
/*
* Origin has not been sent yet;
* skip this clone.
*/
needagain = B_TRUE;
continue;
}
}
zhp = zfs_open(rzhp->zfs_hdl, fsname, ZFS_TYPE_DATASET);
if (zhp == NULL)
return (-1);
err = dump_filesystem(zhp, sdd);
fnvlist_add_boolean(fslist, "sent");
progress = B_TRUE;
zfs_close(zhp);
if (err)
return (err);
}
if (needagain) {
assert(progress);
goto again;
}
/* Clean out the sent flags in case we reuse this fss. */
for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
nvlist_t *fslist;
fslist = fnvpair_value_nvlist(fspair);
(void) nvlist_remove_all(fslist, "sent");
}
return (0);
}
nvlist_t *
zfs_send_resume_token_to_nvlist(libzfs_handle_t *hdl, const char *token)
{
unsigned int version;
int nread, i;
unsigned long long checksum, packed_len;
/*
* Decode token header, which is:
* <token version>-<checksum of payload>-<uncompressed payload length>
* Note that the only supported token version is 1.
*/
nread = sscanf(token, "%u-%llx-%llx-",
&version, &checksum, &packed_len);
if (nread != 3) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (invalid format)"));
return (NULL);
}
if (version != ZFS_SEND_RESUME_TOKEN_VERSION) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (invalid version %u)"),
version);
return (NULL);
}
/* Convert hexadecimal representation to binary. */
token = strrchr(token, '-') + 1;
int len = strlen(token) / 2;
unsigned char *compressed = zfs_alloc(hdl, len);
for (i = 0; i < len; i++) {
nread = sscanf(token + i * 2, "%2hhx", compressed + i);
if (nread != 1) {
free(compressed);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt "
"(payload is not hex-encoded)"));
return (NULL);
}
}
/* Verify checksum. */
zio_cksum_t cksum;
fletcher_4_native_varsize(compressed, len, &cksum);
if (cksum.zc_word[0] != checksum) {
free(compressed);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (incorrect checksum)"));
return (NULL);
}
/* Uncompress. */
void *packed = zfs_alloc(hdl, packed_len);
uLongf packed_len_long = packed_len;
if (uncompress(packed, &packed_len_long, compressed, len) != Z_OK ||
packed_len_long != packed_len) {
free(packed);
free(compressed);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (decompression failed)"));
return (NULL);
}
/* Unpack nvlist. */
nvlist_t *nv;
int error = nvlist_unpack(packed, packed_len, &nv, KM_SLEEP);
free(packed);
free(compressed);
if (error != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (nvlist_unpack failed)"));
return (NULL);
}
return (nv);
}
static enum lzc_send_flags
lzc_flags_from_sendflags(const sendflags_t *flags)
{
enum lzc_send_flags lzc_flags = 0;
if (flags->largeblock)
lzc_flags |= LZC_SEND_FLAG_LARGE_BLOCK;
if (flags->embed_data)
lzc_flags |= LZC_SEND_FLAG_EMBED_DATA;
if (flags->compress)
lzc_flags |= LZC_SEND_FLAG_COMPRESS;
if (flags->raw)
lzc_flags |= LZC_SEND_FLAG_RAW;
if (flags->saved)
lzc_flags |= LZC_SEND_FLAG_SAVED;
return (lzc_flags);
}
static int
estimate_size(zfs_handle_t *zhp, const char *from, int fd, sendflags_t *flags,
uint64_t resumeobj, uint64_t resumeoff, uint64_t bytes,
const char *redactbook, char *errbuf, uint64_t *sizep)
{
uint64_t size;
FILE *fout = flags->dryrun ? stdout : stderr;
progress_arg_t pa = { 0 };
int err = 0;
pthread_t ptid;
+ sigset_t oldmask;
- if (flags->progress || flags->progressastitle) {
+ {
pa.pa_zhp = zhp;
pa.pa_fd = fd;
pa.pa_parsable = flags->parsable;
pa.pa_estimate = B_TRUE;
pa.pa_verbosity = flags->verbosity;
err = pthread_create(&ptid, NULL,
send_progress_thread, &pa);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", strerror(errno));
return (zfs_error(zhp->zfs_hdl,
EZFS_THREADCREATEFAILED, errbuf));
}
+ SEND_PROGRESS_THREAD_PARENT_BLOCK(&oldmask);
}
err = lzc_send_space_resume_redacted(zhp->zfs_name, from,
lzc_flags_from_sendflags(flags), resumeobj, resumeoff, bytes,
redactbook, fd, &size);
*sizep = size;
- if ((flags->progress || flags->progressastitle) &&
- send_progress_thread_exit(zhp->zfs_hdl, ptid))
+ if (send_progress_thread_exit(zhp->zfs_hdl, ptid, &oldmask))
return (-1);
if (!flags->progress && !flags->parsable)
return (err);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", strerror(err));
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
send_print_verbose(fout, zhp->zfs_name, from, size,
flags->parsable);
if (flags->parsable) {
(void) fprintf(fout, "size\t%llu\n", (longlong_t)size);
} else {
char buf[16];
zfs_nicenum(size, buf, sizeof (buf));
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"total estimated size is %s\n"), buf);
}
return (0);
}
static boolean_t
redact_snaps_contains(const uint64_t *snaps, uint64_t num_snaps, uint64_t guid)
{
for (int i = 0; i < num_snaps; i++) {
if (snaps[i] == guid)
return (B_TRUE);
}
return (B_FALSE);
}
static boolean_t
redact_snaps_equal(const uint64_t *snaps1, uint64_t num_snaps1,
const uint64_t *snaps2, uint64_t num_snaps2)
{
if (num_snaps1 != num_snaps2)
return (B_FALSE);
for (int i = 0; i < num_snaps1; i++) {
if (!redact_snaps_contains(snaps2, num_snaps2, snaps1[i]))
return (B_FALSE);
}
return (B_TRUE);
}
static int
get_bookmarks(const char *path, nvlist_t **bmarksp)
{
nvlist_t *props = fnvlist_alloc();
int error;
fnvlist_add_boolean(props, "redact_complete");
fnvlist_add_boolean(props, zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS));
error = lzc_get_bookmarks(path, props, bmarksp);
fnvlist_free(props);
return (error);
}
static nvpair_t *
find_redact_pair(nvlist_t *bmarks, const uint64_t *redact_snap_guids,
int num_redact_snaps)
{
nvpair_t *pair;
for (pair = nvlist_next_nvpair(bmarks, NULL); pair;
pair = nvlist_next_nvpair(bmarks, pair)) {
nvlist_t *bmark = fnvpair_value_nvlist(pair);
nvlist_t *vallist = fnvlist_lookup_nvlist(bmark,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS));
uint_t len = 0;
uint64_t *bmarksnaps = fnvlist_lookup_uint64_array(vallist,
ZPROP_VALUE, &len);
if (redact_snaps_equal(redact_snap_guids,
num_redact_snaps, bmarksnaps, len)) {
break;
}
}
return (pair);
}
static boolean_t
get_redact_complete(nvpair_t *pair)
{
nvlist_t *bmark = fnvpair_value_nvlist(pair);
nvlist_t *vallist = fnvlist_lookup_nvlist(bmark, "redact_complete");
boolean_t complete = fnvlist_lookup_boolean_value(vallist,
ZPROP_VALUE);
return (complete);
}
/*
* Check that the list of redaction snapshots in the bookmark matches the send
* we're resuming, and return whether or not it's complete.
*
* Note that the caller needs to free the contents of *bookname with free() if
* this function returns successfully.
*/
static int
find_redact_book(libzfs_handle_t *hdl, const char *path,
const uint64_t *redact_snap_guids, int num_redact_snaps,
char **bookname)
{
char errbuf[ERRBUFLEN];
nvlist_t *bmarks;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot resume send"));
int error = get_bookmarks(path, &bmarks);
if (error != 0) {
if (error == ESRCH) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"nonexistent redaction bookmark provided"));
} else if (error == ENOENT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset to be sent no longer exists"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"unknown error: %s"), strerror(error));
}
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
nvpair_t *pair = find_redact_pair(bmarks, redact_snap_guids,
num_redact_snaps);
if (pair == NULL) {
fnvlist_free(bmarks);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no appropriate redaction bookmark exists"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
boolean_t complete = get_redact_complete(pair);
if (!complete) {
fnvlist_free(bmarks);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incomplete redaction bookmark provided"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
*bookname = strndup(nvpair_name(pair), ZFS_MAX_DATASET_NAME_LEN);
ASSERT3P(*bookname, !=, NULL);
fnvlist_free(bmarks);
return (0);
}
static enum lzc_send_flags
lzc_flags_from_resume_nvl(nvlist_t *resume_nvl)
{
enum lzc_send_flags lzc_flags = 0;
if (nvlist_exists(resume_nvl, "largeblockok"))
lzc_flags |= LZC_SEND_FLAG_LARGE_BLOCK;
if (nvlist_exists(resume_nvl, "embedok"))
lzc_flags |= LZC_SEND_FLAG_EMBED_DATA;
if (nvlist_exists(resume_nvl, "compressok"))
lzc_flags |= LZC_SEND_FLAG_COMPRESS;
if (nvlist_exists(resume_nvl, "rawok"))
lzc_flags |= LZC_SEND_FLAG_RAW;
if (nvlist_exists(resume_nvl, "savedok"))
lzc_flags |= LZC_SEND_FLAG_SAVED;
return (lzc_flags);
}
static int
zfs_send_resume_impl_cb_impl(libzfs_handle_t *hdl, sendflags_t *flags,
int outfd, nvlist_t *resume_nvl)
{
char errbuf[ERRBUFLEN];
const char *toname;
const char *fromname = NULL;
uint64_t resumeobj, resumeoff, toguid, fromguid, bytes;
zfs_handle_t *zhp;
int error = 0;
char name[ZFS_MAX_DATASET_NAME_LEN];
FILE *fout = (flags->verbosity > 0 && flags->dryrun) ? stdout : stderr;
uint64_t *redact_snap_guids = NULL;
int num_redact_snaps = 0;
char *redact_book = NULL;
uint64_t size = 0;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot resume send"));
if (flags->verbosity != 0) {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"resume token contents:\n"));
nvlist_print(fout, resume_nvl);
}
if (nvlist_lookup_string(resume_nvl, "toname", &toname) != 0 ||
nvlist_lookup_uint64(resume_nvl, "object", &resumeobj) != 0 ||
nvlist_lookup_uint64(resume_nvl, "offset", &resumeoff) != 0 ||
nvlist_lookup_uint64(resume_nvl, "bytes", &bytes) != 0 ||
nvlist_lookup_uint64(resume_nvl, "toguid", &toguid) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt"));
return (zfs_error(hdl, EZFS_FAULT, errbuf));
}
fromguid = 0;
(void) nvlist_lookup_uint64(resume_nvl, "fromguid", &fromguid);
if (flags->saved) {
(void) strlcpy(name, toname, sizeof (name));
} else {
error = guid_to_name(hdl, toname, toguid, B_FALSE, name);
if (error != 0) {
if (zfs_dataset_exists(hdl, toname, ZFS_TYPE_DATASET)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is no longer the same snapshot "
"used in the initial send"), toname);
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' used in the initial send no "
"longer exists"), toname);
}
return (zfs_error(hdl, EZFS_BADPATH, errbuf));
}
}
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
if (zhp == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"unable to access '%s'"), name);
return (zfs_error(hdl, EZFS_BADPATH, errbuf));
}
if (nvlist_lookup_uint64_array(resume_nvl, "book_redact_snaps",
&redact_snap_guids, (uint_t *)&num_redact_snaps) != 0) {
num_redact_snaps = -1;
}
if (fromguid != 0) {
if (guid_to_name_redact_snaps(hdl, toname, fromguid, B_TRUE,
redact_snap_guids, num_redact_snaps, name) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source %#llx no longer exists"),
(longlong_t)fromguid);
return (zfs_error(hdl, EZFS_BADPATH, errbuf));
}
fromname = name;
}
redact_snap_guids = NULL;
if (nvlist_lookup_uint64_array(resume_nvl,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS), &redact_snap_guids,
(uint_t *)&num_redact_snaps) == 0) {
char path[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(path, toname, sizeof (path));
char *at = strchr(path, '@');
ASSERT3P(at, !=, NULL);
*at = '\0';
if ((error = find_redact_book(hdl, path, redact_snap_guids,
num_redact_snaps, &redact_book)) != 0) {
return (error);
}
}
enum lzc_send_flags lzc_flags = lzc_flags_from_sendflags(flags) |
lzc_flags_from_resume_nvl(resume_nvl);
if (flags->verbosity != 0 || flags->progressastitle) {
/*
* Some of these may have come from the resume token, set them
* here for size estimate purposes.
*/
sendflags_t tmpflags = *flags;
if (lzc_flags & LZC_SEND_FLAG_LARGE_BLOCK)
tmpflags.largeblock = B_TRUE;
if (lzc_flags & LZC_SEND_FLAG_COMPRESS)
tmpflags.compress = B_TRUE;
if (lzc_flags & LZC_SEND_FLAG_EMBED_DATA)
tmpflags.embed_data = B_TRUE;
if (lzc_flags & LZC_SEND_FLAG_RAW)
tmpflags.raw = B_TRUE;
if (lzc_flags & LZC_SEND_FLAG_SAVED)
tmpflags.saved = B_TRUE;
error = estimate_size(zhp, fromname, outfd, &tmpflags,
resumeobj, resumeoff, bytes, redact_book, errbuf, &size);
}
if (!flags->dryrun) {
progress_arg_t pa = { 0 };
pthread_t tid;
+ sigset_t oldmask;
/*
* If progress reporting is requested, spawn a new thread to
* poll ZFS_IOC_SEND_PROGRESS at a regular interval.
*/
- if (flags->progress || flags->progressastitle) {
+ {
pa.pa_zhp = zhp;
pa.pa_fd = outfd;
pa.pa_parsable = flags->parsable;
pa.pa_estimate = B_FALSE;
pa.pa_verbosity = flags->verbosity;
pa.pa_size = size;
pa.pa_astitle = flags->progressastitle;
pa.pa_progress = flags->progress;
error = pthread_create(&tid, NULL,
send_progress_thread, &pa);
if (error != 0) {
if (redact_book != NULL)
free(redact_book);
zfs_close(zhp);
return (error);
}
+ SEND_PROGRESS_THREAD_PARENT_BLOCK(&oldmask);
}
error = lzc_send_resume_redacted(zhp->zfs_name, fromname, outfd,
lzc_flags, resumeobj, resumeoff, redact_book);
if (redact_book != NULL)
free(redact_book);
- if ((flags->progressastitle || flags->progress) &&
- send_progress_thread_exit(hdl, tid)) {
+ if (send_progress_thread_exit(hdl, tid, &oldmask)) {
zfs_close(zhp);
return (-1);
}
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot send '%s'"), zhp->zfs_name);
zfs_close(zhp);
switch (error) {
case 0:
return (0);
case EACCES:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"source key must be loaded"));
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
case ESRCH:
if (lzc_exists(zhp->zfs_name)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source could not be found"));
}
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EXDEV:
case ENOENT:
case EDQUOT:
case EFBIG:
case EIO:
case ENOLINK:
case ENOSPC:
case ENOSTR:
case ENXIO:
case EPIPE:
case ERANGE:
case EFAULT:
case EROFS:
zfs_error_aux(hdl, "%s", strerror(errno));
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
default:
return (zfs_standard_error(hdl, errno, errbuf));
}
} else {
if (redact_book != NULL)
free(redact_book);
}
zfs_close(zhp);
return (error);
}
struct zfs_send_resume_impl {
libzfs_handle_t *hdl;
sendflags_t *flags;
nvlist_t *resume_nvl;
};
static int
zfs_send_resume_impl_cb(int outfd, void *arg)
{
struct zfs_send_resume_impl *zsri = arg;
return (zfs_send_resume_impl_cb_impl(zsri->hdl, zsri->flags, outfd,
zsri->resume_nvl));
}
static int
zfs_send_resume_impl(libzfs_handle_t *hdl, sendflags_t *flags, int outfd,
nvlist_t *resume_nvl)
{
struct zfs_send_resume_impl zsri = {
.hdl = hdl,
.flags = flags,
.resume_nvl = resume_nvl,
};
return (lzc_send_wrapper(zfs_send_resume_impl_cb, outfd, &zsri));
}
int
zfs_send_resume(libzfs_handle_t *hdl, sendflags_t *flags, int outfd,
const char *resume_token)
{
int ret;
char errbuf[ERRBUFLEN];
nvlist_t *resume_nvl;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot resume send"));
resume_nvl = zfs_send_resume_token_to_nvlist(hdl, resume_token);
if (resume_nvl == NULL) {
/*
* zfs_error_aux has already been set by
* zfs_send_resume_token_to_nvlist()
*/
return (zfs_error(hdl, EZFS_FAULT, errbuf));
}
ret = zfs_send_resume_impl(hdl, flags, outfd, resume_nvl);
fnvlist_free(resume_nvl);
return (ret);
}
int
zfs_send_saved(zfs_handle_t *zhp, sendflags_t *flags, int outfd,
const char *resume_token)
{
int ret;
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *saved_nvl = NULL, *resume_nvl = NULL;
uint64_t saved_guid = 0, resume_guid = 0;
uint64_t obj = 0, off = 0, bytes = 0;
char token_buf[ZFS_MAXPROPLEN];
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"saved send failed"));
ret = zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
token_buf, sizeof (token_buf), NULL, NULL, 0, B_TRUE);
if (ret != 0)
goto out;
saved_nvl = zfs_send_resume_token_to_nvlist(hdl, token_buf);
if (saved_nvl == NULL) {
/*
* zfs_error_aux has already been set by
* zfs_send_resume_token_to_nvlist()
*/
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
goto out;
}
/*
* If a resume token is provided we use the object and offset
* from that instead of the default, which starts from the
* beginning.
*/
if (resume_token != NULL) {
resume_nvl = zfs_send_resume_token_to_nvlist(hdl,
resume_token);
if (resume_nvl == NULL) {
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
goto out;
}
if (nvlist_lookup_uint64(resume_nvl, "object", &obj) != 0 ||
nvlist_lookup_uint64(resume_nvl, "offset", &off) != 0 ||
nvlist_lookup_uint64(resume_nvl, "bytes", &bytes) != 0 ||
nvlist_lookup_uint64(resume_nvl, "toguid",
&resume_guid) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"provided resume token is corrupt"));
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
goto out;
}
if (nvlist_lookup_uint64(saved_nvl, "toguid",
&saved_guid)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset's resume token is corrupt"));
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
goto out;
}
if (resume_guid != saved_guid) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"provided resume token does not match dataset"));
ret = zfs_error(hdl, EZFS_BADBACKUP, errbuf);
goto out;
}
}
(void) nvlist_remove_all(saved_nvl, "object");
fnvlist_add_uint64(saved_nvl, "object", obj);
(void) nvlist_remove_all(saved_nvl, "offset");
fnvlist_add_uint64(saved_nvl, "offset", off);
(void) nvlist_remove_all(saved_nvl, "bytes");
fnvlist_add_uint64(saved_nvl, "bytes", bytes);
(void) nvlist_remove_all(saved_nvl, "toname");
fnvlist_add_string(saved_nvl, "toname", zhp->zfs_name);
ret = zfs_send_resume_impl(hdl, flags, outfd, saved_nvl);
out:
fnvlist_free(saved_nvl);
fnvlist_free(resume_nvl);
return (ret);
}
/*
* This function informs the target system that the recursive send is complete.
* The record is also expected in the case of a send -p.
*/
static int
send_conclusion_record(int fd, zio_cksum_t *zc)
{
dmu_replay_record_t drr = { 0 };
drr.drr_type = DRR_END;
if (zc != NULL)
drr.drr_u.drr_end.drr_checksum = *zc;
if (write(fd, &drr, sizeof (drr)) == -1) {
return (errno);
}
return (0);
}
/*
* This function is responsible for sending the records that contain the
* necessary information for the target system's libzfs to be able to set the
* properties of the filesystem being received, or to be able to prepare for
* a recursive receive.
*
* The "zhp" argument is the handle of the snapshot we are sending
* (the "tosnap"). The "from" argument is the short snapshot name (the part
* after the @) of the incremental source.
*/
static int
send_prelim_records(zfs_handle_t *zhp, const char *from, int fd,
boolean_t gather_props, boolean_t recursive, boolean_t verbose,
boolean_t dryrun, boolean_t raw, boolean_t replicate, boolean_t skipmissing,
boolean_t backup, boolean_t holds, boolean_t props, boolean_t doall,
nvlist_t **fssp, avl_tree_t **fsavlp)
{
int err = 0;
char *packbuf = NULL;
size_t buflen = 0;
zio_cksum_t zc = { {0} };
int featureflags = 0;
/* name of filesystem/volume that contains snapshot we are sending */
char tofs[ZFS_MAX_DATASET_NAME_LEN];
/* short name of snap we are sending */
const char *tosnap = "";
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot send '%s'"), zhp->zfs_name);
if (zhp->zfs_type == ZFS_TYPE_FILESYSTEM && zfs_prop_get_int(zhp,
ZFS_PROP_VERSION) >= ZPL_VERSION_SA) {
featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
}
if (holds)
featureflags |= DMU_BACKUP_FEATURE_HOLDS;
(void) strlcpy(tofs, zhp->zfs_name, ZFS_MAX_DATASET_NAME_LEN);
char *at = strchr(tofs, '@');
if (at != NULL) {
*at = '\0';
tosnap = at + 1;
}
if (gather_props) {
nvlist_t *hdrnv = fnvlist_alloc();
nvlist_t *fss = NULL;
if (from != NULL)
fnvlist_add_string(hdrnv, "fromsnap", from);
fnvlist_add_string(hdrnv, "tosnap", tosnap);
if (!recursive)
fnvlist_add_boolean(hdrnv, "not_recursive");
if (raw) {
fnvlist_add_boolean(hdrnv, "raw");
}
if (gather_nvlist(zhp->zfs_hdl, tofs,
from, tosnap, recursive, raw, doall, replicate, skipmissing,
verbose, backup, holds, props, &fss, fsavlp) != 0) {
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
/*
* Do not allow the size of the properties list to exceed
* the limit
*/
if ((fnvlist_size(fss) + fnvlist_size(hdrnv)) >
zhp->zfs_hdl->libzfs_max_nvlist) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "warning: cannot send '%s': "
"the size of the list of snapshots and properties "
"is too large to be received successfully.\n"
"Select a smaller number of snapshots to send.\n"),
zhp->zfs_name);
return (zfs_error(zhp->zfs_hdl, EZFS_NOSPC,
errbuf));
}
fnvlist_add_nvlist(hdrnv, "fss", fss);
VERIFY0(nvlist_pack(hdrnv, &packbuf, &buflen, NV_ENCODE_XDR,
0));
if (fssp != NULL) {
*fssp = fss;
} else {
fnvlist_free(fss);
}
fnvlist_free(hdrnv);
}
if (!dryrun) {
dmu_replay_record_t drr = { 0 };
/* write first begin record */
drr.drr_type = DRR_BEGIN;
drr.drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
DMU_SET_STREAM_HDRTYPE(drr.drr_u.drr_begin.
drr_versioninfo, DMU_COMPOUNDSTREAM);
DMU_SET_FEATUREFLAGS(drr.drr_u.drr_begin.
drr_versioninfo, featureflags);
if (snprintf(drr.drr_u.drr_begin.drr_toname,
sizeof (drr.drr_u.drr_begin.drr_toname), "%s@%s", tofs,
tosnap) >= sizeof (drr.drr_u.drr_begin.drr_toname)) {
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
drr.drr_payloadlen = buflen;
err = dump_record(&drr, packbuf, buflen, &zc, fd);
free(packbuf);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", strerror(err));
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
err = send_conclusion_record(fd, &zc);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", strerror(err));
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
}
return (0);
}
/*
* Generate a send stream. The "zhp" argument is the filesystem/volume
* that contains the snapshot to send. The "fromsnap" argument is the
* short name (the part after the '@') of the snapshot that is the
* incremental source to send from (if non-NULL). The "tosnap" argument
* is the short name of the snapshot to send.
*
* The content of the send stream is the snapshot identified by
* 'tosnap'. Incremental streams are requested in two ways:
* - from the snapshot identified by "fromsnap" (if non-null) or
* - from the origin of the dataset identified by zhp, which must
* be a clone. In this case, "fromsnap" is null and "fromorigin"
* is TRUE.
*
* The send stream is recursive (i.e. dumps a hierarchy of snapshots) and
* uses a special header (with a hdrtype field of DMU_COMPOUNDSTREAM)
* if "replicate" is set. If "doall" is set, dump all the intermediate
* snapshots. The DMU_COMPOUNDSTREAM header is used in the "doall"
* case too. If "props" is set, send properties.
*
* Pre-wrapped (cf. lzc_send_wrapper()).
*/
static int
zfs_send_cb_impl(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
sendflags_t *flags, int outfd, snapfilter_cb_t filter_func,
void *cb_arg, nvlist_t **debugnvp)
{
char errbuf[ERRBUFLEN];
send_dump_data_t sdd = { 0 };
int err = 0;
nvlist_t *fss = NULL;
avl_tree_t *fsavl = NULL;
static uint64_t holdseq;
int spa_version;
FILE *fout;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot send '%s'"), zhp->zfs_name);
if (fromsnap && fromsnap[0] == '\0') {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"zero-length incremental source"));
return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf));
}
if (fromsnap) {
char full_fromsnap_name[ZFS_MAX_DATASET_NAME_LEN];
if (snprintf(full_fromsnap_name, sizeof (full_fromsnap_name),
"%s@%s", zhp->zfs_name, fromsnap) >=
sizeof (full_fromsnap_name)) {
err = EINVAL;
goto stderr_out;
}
zfs_handle_t *fromsnapn = zfs_open(zhp->zfs_hdl,
full_fromsnap_name, ZFS_TYPE_SNAPSHOT);
if (fromsnapn == NULL) {
err = -1;
goto err_out;
}
zfs_close(fromsnapn);
}
if (flags->replicate || flags->doall || flags->props ||
flags->holds || flags->backup) {
char full_tosnap_name[ZFS_MAX_DATASET_NAME_LEN];
if (snprintf(full_tosnap_name, sizeof (full_tosnap_name),
"%s@%s", zhp->zfs_name, tosnap) >=
sizeof (full_tosnap_name)) {
err = EINVAL;
goto stderr_out;
}
zfs_handle_t *tosnap = zfs_open(zhp->zfs_hdl,
full_tosnap_name, ZFS_TYPE_SNAPSHOT);
if (tosnap == NULL) {
err = -1;
goto err_out;
}
err = send_prelim_records(tosnap, fromsnap, outfd,
flags->replicate || flags->props || flags->holds,
flags->replicate, flags->verbosity > 0, flags->dryrun,
flags->raw, flags->replicate, flags->skipmissing,
flags->backup, flags->holds, flags->props, flags->doall,
&fss, &fsavl);
zfs_close(tosnap);
if (err != 0)
goto err_out;
}
/* dump each stream */
sdd.fromsnap = fromsnap;
sdd.tosnap = tosnap;
sdd.outfd = outfd;
sdd.replicate = flags->replicate;
sdd.doall = flags->doall;
sdd.fromorigin = flags->fromorigin;
sdd.fss = fss;
sdd.fsavl = fsavl;
sdd.verbosity = flags->verbosity;
sdd.parsable = flags->parsable;
sdd.progress = flags->progress;
sdd.progressastitle = flags->progressastitle;
sdd.dryrun = flags->dryrun;
sdd.large_block = flags->largeblock;
sdd.embed_data = flags->embed_data;
sdd.compress = flags->compress;
sdd.raw = flags->raw;
sdd.holds = flags->holds;
sdd.filter_cb = filter_func;
sdd.filter_cb_arg = cb_arg;
if (debugnvp)
sdd.debugnv = *debugnvp;
if (sdd.verbosity != 0 && sdd.dryrun)
sdd.std_out = B_TRUE;
fout = sdd.std_out ? stdout : stderr;
/*
* Some flags require that we place user holds on the datasets that are
* being sent so they don't get destroyed during the send. We can skip
* this step if the pool is imported read-only since the datasets cannot
* be destroyed.
*/
if (!flags->dryrun && !zpool_get_prop_int(zfs_get_pool_handle(zhp),
ZPOOL_PROP_READONLY, NULL) &&
zfs_spa_version(zhp, &spa_version) == 0 &&
spa_version >= SPA_VERSION_USERREFS &&
(flags->doall || flags->replicate)) {
++holdseq;
(void) snprintf(sdd.holdtag, sizeof (sdd.holdtag),
".send-%d-%llu", getpid(), (u_longlong_t)holdseq);
sdd.cleanup_fd = open(ZFS_DEV, O_RDWR | O_CLOEXEC);
if (sdd.cleanup_fd < 0) {
err = errno;
goto stderr_out;
}
sdd.snapholds = fnvlist_alloc();
} else {
sdd.cleanup_fd = -1;
sdd.snapholds = NULL;
}
if (flags->verbosity != 0 || sdd.snapholds != NULL) {
/*
* Do a verbose no-op dry run to get all the verbose output
* or to gather snapshot hold's before generating any data,
* then do a non-verbose real run to generate the streams.
*/
sdd.dryrun = B_TRUE;
err = dump_filesystems(zhp, &sdd);
if (err != 0)
goto stderr_out;
if (flags->verbosity != 0) {
if (flags->parsable) {
(void) fprintf(fout, "size\t%llu\n",
(longlong_t)sdd.size);
} else {
char buf[16];
zfs_nicebytes(sdd.size, buf, sizeof (buf));
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"total estimated size is %s\n"), buf);
}
}
/* Ensure no snaps found is treated as an error. */
if (!sdd.seento) {
err = ENOENT;
goto err_out;
}
/* Skip the second run if dryrun was requested. */
if (flags->dryrun)
goto err_out;
if (sdd.snapholds != NULL) {
err = zfs_hold_nvl(zhp, sdd.cleanup_fd, sdd.snapholds);
if (err != 0)
goto stderr_out;
fnvlist_free(sdd.snapholds);
sdd.snapholds = NULL;
}
sdd.dryrun = B_FALSE;
sdd.verbosity = 0;
}
err = dump_filesystems(zhp, &sdd);
fsavl_destroy(fsavl);
fnvlist_free(fss);
/* Ensure no snaps found is treated as an error. */
if (err == 0 && !sdd.seento)
err = ENOENT;
if (sdd.cleanup_fd != -1) {
VERIFY(0 == close(sdd.cleanup_fd));
sdd.cleanup_fd = -1;
}
if (!flags->dryrun && (flags->replicate || flags->doall ||
flags->props || flags->backup || flags->holds)) {
/*
* write final end record. NB: want to do this even if
* there was some error, because it might not be totally
* failed.
*/
int err2 = send_conclusion_record(outfd, NULL);
if (err2 != 0)
return (zfs_standard_error(zhp->zfs_hdl, err2, errbuf));
}
return (err || sdd.err);
stderr_out:
err = zfs_standard_error(zhp->zfs_hdl, err, errbuf);
err_out:
fsavl_destroy(fsavl);
fnvlist_free(fss);
fnvlist_free(sdd.snapholds);
if (sdd.cleanup_fd != -1)
VERIFY(0 == close(sdd.cleanup_fd));
return (err);
}
struct zfs_send {
zfs_handle_t *zhp;
const char *fromsnap;
const char *tosnap;
sendflags_t *flags;
snapfilter_cb_t *filter_func;
void *cb_arg;
nvlist_t **debugnvp;
};
static int
zfs_send_cb(int outfd, void *arg)
{
struct zfs_send *zs = arg;
return (zfs_send_cb_impl(zs->zhp, zs->fromsnap, zs->tosnap, zs->flags,
outfd, zs->filter_func, zs->cb_arg, zs->debugnvp));
}
int
zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
sendflags_t *flags, int outfd, snapfilter_cb_t filter_func,
void *cb_arg, nvlist_t **debugnvp)
{
struct zfs_send arg = {
.zhp = zhp,
.fromsnap = fromsnap,
.tosnap = tosnap,
.flags = flags,
.filter_func = filter_func,
.cb_arg = cb_arg,
.debugnvp = debugnvp,
};
return (lzc_send_wrapper(zfs_send_cb, outfd, &arg));
}
static zfs_handle_t *
name_to_dir_handle(libzfs_handle_t *hdl, const char *snapname)
{
char dirname[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(dirname, snapname, ZFS_MAX_DATASET_NAME_LEN);
char *c = strchr(dirname, '@');
if (c != NULL)
*c = '\0';
return (zfs_open(hdl, dirname, ZFS_TYPE_DATASET));
}
/*
* Returns B_TRUE if earlier is an earlier snapshot in later's timeline; either
* an earlier snapshot in the same filesystem, or a snapshot before later's
* origin, or it's origin's origin, etc.
*/
static boolean_t
snapshot_is_before(zfs_handle_t *earlier, zfs_handle_t *later)
{
boolean_t ret;
uint64_t later_txg =
(later->zfs_type == ZFS_TYPE_FILESYSTEM ||
later->zfs_type == ZFS_TYPE_VOLUME ?
UINT64_MAX : zfs_prop_get_int(later, ZFS_PROP_CREATETXG));
uint64_t earlier_txg = zfs_prop_get_int(earlier, ZFS_PROP_CREATETXG);
if (earlier_txg >= later_txg)
return (B_FALSE);
zfs_handle_t *earlier_dir = name_to_dir_handle(earlier->zfs_hdl,
earlier->zfs_name);
zfs_handle_t *later_dir = name_to_dir_handle(later->zfs_hdl,
later->zfs_name);
if (strcmp(earlier_dir->zfs_name, later_dir->zfs_name) == 0) {
zfs_close(earlier_dir);
zfs_close(later_dir);
return (B_TRUE);
}
char clonename[ZFS_MAX_DATASET_NAME_LEN];
if (zfs_prop_get(later_dir, ZFS_PROP_ORIGIN, clonename,
ZFS_MAX_DATASET_NAME_LEN, NULL, NULL, 0, B_TRUE) != 0) {
zfs_close(earlier_dir);
zfs_close(later_dir);
return (B_FALSE);
}
zfs_handle_t *origin = zfs_open(earlier->zfs_hdl, clonename,
ZFS_TYPE_DATASET);
uint64_t origin_txg = zfs_prop_get_int(origin, ZFS_PROP_CREATETXG);
/*
* If "earlier" is exactly the origin, then
* snapshot_is_before(earlier, origin) will return false (because
* they're the same).
*/
if (origin_txg == earlier_txg &&
strcmp(origin->zfs_name, earlier->zfs_name) == 0) {
zfs_close(earlier_dir);
zfs_close(later_dir);
zfs_close(origin);
return (B_TRUE);
}
zfs_close(earlier_dir);
zfs_close(later_dir);
ret = snapshot_is_before(earlier, origin);
zfs_close(origin);
return (ret);
}
/*
* The "zhp" argument is the handle of the dataset to send (typically a
* snapshot). The "from" argument is the full name of the snapshot or
* bookmark that is the incremental source.
*
* Pre-wrapped (cf. lzc_send_wrapper()).
*/
static int
zfs_send_one_cb_impl(zfs_handle_t *zhp, const char *from, int fd,
sendflags_t *flags, const char *redactbook)
{
int err;
libzfs_handle_t *hdl = zhp->zfs_hdl;
char *name = zhp->zfs_name;
pthread_t ptid;
progress_arg_t pa = { 0 };
uint64_t size = 0;
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot send '%s'"), name);
if (from != NULL && strchr(from, '@')) {
zfs_handle_t *from_zhp = zfs_open(hdl, from,
ZFS_TYPE_DATASET);
if (from_zhp == NULL)
return (-1);
if (!snapshot_is_before(from_zhp, zhp)) {
zfs_close(from_zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not an earlier snapshot from the same fs"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
}
zfs_close(from_zhp);
}
if (redactbook != NULL) {
char bookname[ZFS_MAX_DATASET_NAME_LEN];
nvlist_t *redact_snaps;
zfs_handle_t *book_zhp;
char *at, *pound;
int dsnamelen;
pound = strchr(redactbook, '#');
if (pound != NULL)
redactbook = pound + 1;
at = strchr(name, '@');
if (at == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot do a redacted send to a filesystem"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
dsnamelen = at - name;
if (snprintf(bookname, sizeof (bookname), "%.*s#%s",
dsnamelen, name, redactbook)
>= sizeof (bookname)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid bookmark name"));
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
book_zhp = zfs_open(hdl, bookname, ZFS_TYPE_BOOKMARK);
if (book_zhp == NULL)
return (-1);
if (nvlist_lookup_nvlist(book_zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS),
&redact_snaps) != 0 || redact_snaps == NULL) {
zfs_close(book_zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not a redaction bookmark"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
zfs_close(book_zhp);
}
/*
* Send fs properties
*/
if (flags->props || flags->holds || flags->backup) {
/*
* Note: the header generated by send_prelim_records()
* assumes that the incremental source is in the same
* filesystem/volume as the target (which is a requirement
* when doing "zfs send -R"). But that isn't always the
* case here (e.g. send from snap in origin, or send from
* bookmark). We pass from=NULL, which will omit this
* information from the prelim records; it isn't used
* when receiving this type of stream.
*/
err = send_prelim_records(zhp, NULL, fd, B_TRUE, B_FALSE,
flags->verbosity > 0, flags->dryrun, flags->raw,
flags->replicate, B_FALSE, flags->backup, flags->holds,
flags->props, flags->doall, NULL, NULL);
if (err != 0)
return (err);
}
/*
* Perform size estimate if verbose was specified.
*/
if (flags->verbosity != 0 || flags->progressastitle) {
err = estimate_size(zhp, from, fd, flags, 0, 0, 0, redactbook,
errbuf, &size);
if (err != 0)
return (err);
}
if (flags->dryrun)
return (0);
/*
* If progress reporting is requested, spawn a new thread to poll
* ZFS_IOC_SEND_PROGRESS at a regular interval.
*/
- if (flags->progress || flags->progressastitle) {
+ sigset_t oldmask;
+ {
pa.pa_zhp = zhp;
pa.pa_fd = fd;
pa.pa_parsable = flags->parsable;
pa.pa_estimate = B_FALSE;
pa.pa_verbosity = flags->verbosity;
pa.pa_size = size;
pa.pa_astitle = flags->progressastitle;
pa.pa_progress = flags->progress;
err = pthread_create(&ptid, NULL,
send_progress_thread, &pa);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", strerror(errno));
return (zfs_error(zhp->zfs_hdl,
EZFS_THREADCREATEFAILED, errbuf));
}
+ SEND_PROGRESS_THREAD_PARENT_BLOCK(&oldmask);
}
err = lzc_send_redacted(name, from, fd,
lzc_flags_from_sendflags(flags), redactbook);
- if ((flags->progress || flags->progressastitle) &&
- send_progress_thread_exit(hdl, ptid))
+ if (send_progress_thread_exit(hdl, ptid, &oldmask))
return (-1);
if (err == 0 && (flags->props || flags->holds || flags->backup)) {
/* Write the final end record. */
err = send_conclusion_record(fd, NULL);
if (err != 0)
return (zfs_standard_error(hdl, err, errbuf));
}
if (err != 0) {
switch (errno) {
case EXDEV:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not an earlier snapshot from the same fs"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
case ENOENT:
case ESRCH:
if (lzc_exists(name)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source (%s) does not exist"),
from);
}
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EACCES:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset key must be loaded"));
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"target is busy; if a filesystem, "
"it must not be mounted"));
return (zfs_error(hdl, EZFS_BUSY, errbuf));
case EDQUOT:
case EFAULT:
case EFBIG:
case EINVAL:
case EIO:
case ENOLINK:
case ENOSPC:
case ENOSTR:
case ENXIO:
case EPIPE:
case ERANGE:
case EROFS:
zfs_error_aux(hdl, "%s", strerror(errno));
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
default:
return (zfs_standard_error(hdl, errno, errbuf));
}
}
return (err != 0);
}
struct zfs_send_one {
zfs_handle_t *zhp;
const char *from;
sendflags_t *flags;
const char *redactbook;
};
static int
zfs_send_one_cb(int fd, void *arg)
{
struct zfs_send_one *zso = arg;
return (zfs_send_one_cb_impl(zso->zhp, zso->from, fd, zso->flags,
zso->redactbook));
}
int
zfs_send_one(zfs_handle_t *zhp, const char *from, int fd, sendflags_t *flags,
const char *redactbook)
{
struct zfs_send_one zso = {
.zhp = zhp,
.from = from,
.flags = flags,
.redactbook = redactbook,
};
return (lzc_send_wrapper(zfs_send_one_cb, fd, &zso));
}
/*
* Routines specific to "zfs recv"
*/
static int
recv_read(libzfs_handle_t *hdl, int fd, void *buf, int ilen,
boolean_t byteswap, zio_cksum_t *zc)
{
char *cp = buf;
int rv;
int len = ilen;
do {
rv = read(fd, cp, len);
cp += rv;
len -= rv;
} while (rv > 0);
if (rv < 0 || len != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"failed to read from stream"));
return (zfs_error(hdl, EZFS_BADSTREAM, dgettext(TEXT_DOMAIN,
"cannot receive")));
}
if (zc) {
if (byteswap)
fletcher_4_incremental_byteswap(buf, ilen, zc);
else
fletcher_4_incremental_native(buf, ilen, zc);
}
return (0);
}
static int
recv_read_nvlist(libzfs_handle_t *hdl, int fd, int len, nvlist_t **nvp,
boolean_t byteswap, zio_cksum_t *zc)
{
char *buf;
int err;
buf = zfs_alloc(hdl, len);
if (len > hdl->libzfs_max_nvlist) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "nvlist too large"));
free(buf);
return (ENOMEM);
}
err = recv_read(hdl, fd, buf, len, byteswap, zc);
if (err != 0) {
free(buf);
return (err);
}
err = nvlist_unpack(buf, len, nvp, 0);
free(buf);
if (err != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"stream (malformed nvlist)"));
return (EINVAL);
}
return (0);
}
/*
* Returns the grand origin (origin of origin of origin...) of a given handle.
* If this dataset is not a clone, it simply returns a copy of the original
* handle.
*/
static zfs_handle_t *
recv_open_grand_origin(zfs_handle_t *zhp)
{
char origin[ZFS_MAX_DATASET_NAME_LEN];
zprop_source_t src;
zfs_handle_t *ozhp = zfs_handle_dup(zhp);
while (ozhp != NULL) {
if (zfs_prop_get(ozhp, ZFS_PROP_ORIGIN, origin,
sizeof (origin), &src, NULL, 0, B_FALSE) != 0)
break;
(void) zfs_close(ozhp);
ozhp = zfs_open(zhp->zfs_hdl, origin, ZFS_TYPE_FILESYSTEM);
}
return (ozhp);
}
static int
recv_rename_impl(zfs_handle_t *zhp, const char *name, const char *newname)
{
int err;
zfs_handle_t *ozhp = NULL;
/*
* Attempt to rename the dataset. If it fails with EACCES we have
* attempted to rename the dataset outside of its encryption root.
* Force the dataset to become an encryption root and try again.
*/
err = lzc_rename(name, newname);
if (err == EACCES) {
ozhp = recv_open_grand_origin(zhp);
if (ozhp == NULL) {
err = ENOENT;
goto out;
}
err = lzc_change_key(ozhp->zfs_name, DCP_CMD_FORCE_NEW_KEY,
NULL, NULL, 0);
if (err != 0)
goto out;
err = lzc_rename(name, newname);
}
out:
if (ozhp != NULL)
zfs_close(ozhp);
return (err);
}
static int
recv_rename(libzfs_handle_t *hdl, const char *name, const char *tryname,
int baselen, char *newname, recvflags_t *flags)
{
static int seq;
int err;
prop_changelist_t *clp = NULL;
zfs_handle_t *zhp = NULL;
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
if (zhp == NULL) {
err = -1;
goto out;
}
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
flags->force ? MS_FORCE : 0);
if (clp == NULL) {
err = -1;
goto out;
}
err = changelist_prefix(clp);
if (err)
goto out;
if (tryname) {
(void) strlcpy(newname, tryname, ZFS_MAX_DATASET_NAME_LEN);
if (flags->verbose) {
(void) printf("attempting rename %s to %s\n",
name, newname);
}
err = recv_rename_impl(zhp, name, newname);
if (err == 0)
changelist_rename(clp, name, tryname);
} else {
err = ENOENT;
}
if (err != 0 && strncmp(name + baselen, "recv-", 5) != 0) {
seq++;
(void) snprintf(newname, ZFS_MAX_DATASET_NAME_LEN,
"%.*srecv-%u-%u", baselen, name, getpid(), seq);
if (flags->verbose) {
(void) printf("failed - trying rename %s to %s\n",
name, newname);
}
err = recv_rename_impl(zhp, name, newname);
if (err == 0)
changelist_rename(clp, name, newname);
if (err && flags->verbose) {
(void) printf("failed (%u) - "
"will try again on next pass\n", errno);
}
err = EAGAIN;
} else if (flags->verbose) {
if (err == 0)
(void) printf("success\n");
else
(void) printf("failed (%u)\n", errno);
}
(void) changelist_postfix(clp);
out:
if (clp != NULL)
changelist_free(clp);
if (zhp != NULL)
zfs_close(zhp);
return (err);
}
static int
recv_promote(libzfs_handle_t *hdl, const char *fsname,
const char *origin_fsname, recvflags_t *flags)
{
int err;
zfs_cmd_t zc = {"\0"};
zfs_handle_t *zhp = NULL, *ozhp = NULL;
if (flags->verbose)
(void) printf("promoting %s\n", fsname);
(void) strlcpy(zc.zc_value, origin_fsname, sizeof (zc.zc_value));
(void) strlcpy(zc.zc_name, fsname, sizeof (zc.zc_name));
/*
* Attempt to promote the dataset. If it fails with EACCES the
* promotion would cause this dataset to leave its encryption root.
* Force the origin to become an encryption root and try again.
*/
err = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc);
if (err == EACCES) {
zhp = zfs_open(hdl, fsname, ZFS_TYPE_DATASET);
if (zhp == NULL) {
err = -1;
goto out;
}
ozhp = recv_open_grand_origin(zhp);
if (ozhp == NULL) {
err = -1;
goto out;
}
err = lzc_change_key(ozhp->zfs_name, DCP_CMD_FORCE_NEW_KEY,
NULL, NULL, 0);
if (err != 0)
goto out;
err = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc);
}
out:
if (zhp != NULL)
zfs_close(zhp);
if (ozhp != NULL)
zfs_close(ozhp);
return (err);
}
static int
recv_destroy(libzfs_handle_t *hdl, const char *name, int baselen,
char *newname, recvflags_t *flags)
{
int err = 0;
prop_changelist_t *clp;
zfs_handle_t *zhp;
boolean_t defer = B_FALSE;
int spa_version;
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
if (zhp == NULL)
return (-1);
zfs_type_t type = zfs_get_type(zhp);
if (type == ZFS_TYPE_SNAPSHOT &&
zfs_spa_version(zhp, &spa_version) == 0 &&
spa_version >= SPA_VERSION_USERREFS)
defer = B_TRUE;
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
flags->force ? MS_FORCE : 0);
zfs_close(zhp);
if (clp == NULL)
return (-1);
err = changelist_prefix(clp);
if (err)
return (err);
if (flags->verbose)
(void) printf("attempting destroy %s\n", name);
if (type == ZFS_TYPE_SNAPSHOT) {
nvlist_t *nv = fnvlist_alloc();
fnvlist_add_boolean(nv, name);
err = lzc_destroy_snaps(nv, defer, NULL);
fnvlist_free(nv);
} else {
err = lzc_destroy(name);
}
if (err == 0) {
if (flags->verbose)
(void) printf("success\n");
changelist_remove(clp, name);
}
(void) changelist_postfix(clp);
changelist_free(clp);
/*
* Deferred destroy might destroy the snapshot or only mark it to be
* destroyed later, and it returns success in either case.
*/
if (err != 0 || (defer && zfs_dataset_exists(hdl, name,
ZFS_TYPE_SNAPSHOT))) {
err = recv_rename(hdl, name, NULL, baselen, newname, flags);
}
return (err);
}
typedef struct guid_to_name_data {
uint64_t guid;
boolean_t bookmark_ok;
char *name;
char *skip;
uint64_t *redact_snap_guids;
uint64_t num_redact_snaps;
} guid_to_name_data_t;
static boolean_t
redact_snaps_match(zfs_handle_t *zhp, guid_to_name_data_t *gtnd)
{
uint64_t *bmark_snaps;
uint_t bmark_num_snaps;
nvlist_t *nvl;
if (zhp->zfs_type != ZFS_TYPE_BOOKMARK)
return (B_FALSE);
nvl = fnvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS));
bmark_snaps = fnvlist_lookup_uint64_array(nvl, ZPROP_VALUE,
&bmark_num_snaps);
if (bmark_num_snaps != gtnd->num_redact_snaps)
return (B_FALSE);
int i = 0;
for (; i < bmark_num_snaps; i++) {
int j = 0;
for (; j < bmark_num_snaps; j++) {
if (bmark_snaps[i] == gtnd->redact_snap_guids[j])
break;
}
if (j == bmark_num_snaps)
break;
}
return (i == bmark_num_snaps);
}
static int
guid_to_name_cb(zfs_handle_t *zhp, void *arg)
{
guid_to_name_data_t *gtnd = arg;
const char *slash;
int err;
if (gtnd->skip != NULL &&
(slash = strrchr(zhp->zfs_name, '/')) != NULL &&
strcmp(slash + 1, gtnd->skip) == 0) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_GUID) == gtnd->guid &&
(gtnd->num_redact_snaps == -1 || redact_snaps_match(zhp, gtnd))) {
(void) strcpy(gtnd->name, zhp->zfs_name);
zfs_close(zhp);
return (EEXIST);
}
err = zfs_iter_children_v2(zhp, 0, guid_to_name_cb, gtnd);
if (err != EEXIST && gtnd->bookmark_ok)
err = zfs_iter_bookmarks_v2(zhp, 0, guid_to_name_cb, gtnd);
zfs_close(zhp);
return (err);
}
/*
* Attempt to find the local dataset associated with this guid. In the case of
* multiple matches, we attempt to find the "best" match by searching
* progressively larger portions of the hierarchy. This allows one to send a
* tree of datasets individually and guarantee that we will find the source
* guid within that hierarchy, even if there are multiple matches elsewhere.
*
* If num_redact_snaps is not -1, we attempt to find a redaction bookmark with
* the specified number of redaction snapshots. If num_redact_snaps isn't 0 or
* -1, then redact_snap_guids will be an array of the guids of the snapshots the
* redaction bookmark was created with. If num_redact_snaps is -1, then we will
* attempt to find a snapshot or bookmark (if bookmark_ok is passed) with the
* given guid. Note that a redaction bookmark can be returned if
* num_redact_snaps == -1.
*/
static int
guid_to_name_redact_snaps(libzfs_handle_t *hdl, const char *parent,
uint64_t guid, boolean_t bookmark_ok, uint64_t *redact_snap_guids,
uint64_t num_redact_snaps, char *name)
{
char pname[ZFS_MAX_DATASET_NAME_LEN];
guid_to_name_data_t gtnd;
gtnd.guid = guid;
gtnd.bookmark_ok = bookmark_ok;
gtnd.name = name;
gtnd.skip = NULL;
gtnd.redact_snap_guids = redact_snap_guids;
gtnd.num_redact_snaps = num_redact_snaps;
/*
* Search progressively larger portions of the hierarchy, starting
* with the filesystem specified by 'parent'. This will
* select the "most local" version of the origin snapshot in the case
* that there are multiple matching snapshots in the system.
*/
(void) strlcpy(pname, parent, sizeof (pname));
char *cp = strrchr(pname, '@');
if (cp == NULL)
cp = strchr(pname, '\0');
for (; cp != NULL; cp = strrchr(pname, '/')) {
/* Chop off the last component and open the parent */
*cp = '\0';
zfs_handle_t *zhp = make_dataset_handle(hdl, pname);
if (zhp == NULL)
continue;
int err = guid_to_name_cb(zfs_handle_dup(zhp), &gtnd);
if (err != EEXIST)
err = zfs_iter_children_v2(zhp, 0, guid_to_name_cb,
&gtnd);
if (err != EEXIST && bookmark_ok)
err = zfs_iter_bookmarks_v2(zhp, 0, guid_to_name_cb,
&gtnd);
zfs_close(zhp);
if (err == EEXIST)
return (0);
/*
* Remember the last portion of the dataset so we skip it next
* time through (as we've already searched that portion of the
* hierarchy).
*/
gtnd.skip = strrchr(pname, '/') + 1;
}
return (ENOENT);
}
static int
guid_to_name(libzfs_handle_t *hdl, const char *parent, uint64_t guid,
boolean_t bookmark_ok, char *name)
{
return (guid_to_name_redact_snaps(hdl, parent, guid, bookmark_ok, NULL,
-1, name));
}
/*
* Return +1 if guid1 is before guid2, 0 if they are the same, and -1 if
* guid1 is after guid2.
*/
static int
created_before(libzfs_handle_t *hdl, avl_tree_t *avl,
uint64_t guid1, uint64_t guid2)
{
nvlist_t *nvfs;
const char *fsname = NULL, *snapname = NULL;
char buf[ZFS_MAX_DATASET_NAME_LEN];
int rv;
zfs_handle_t *guid1hdl, *guid2hdl;
uint64_t create1, create2;
if (guid2 == 0)
return (0);
if (guid1 == 0)
return (1);
nvfs = fsavl_find(avl, guid1, &snapname);
fsname = fnvlist_lookup_string(nvfs, "name");
(void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname);
guid1hdl = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT);
if (guid1hdl == NULL)
return (-1);
nvfs = fsavl_find(avl, guid2, &snapname);
fsname = fnvlist_lookup_string(nvfs, "name");
(void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname);
guid2hdl = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT);
if (guid2hdl == NULL) {
zfs_close(guid1hdl);
return (-1);
}
create1 = zfs_prop_get_int(guid1hdl, ZFS_PROP_CREATETXG);
create2 = zfs_prop_get_int(guid2hdl, ZFS_PROP_CREATETXG);
if (create1 < create2)
rv = -1;
else if (create1 > create2)
rv = +1;
else
rv = 0;
zfs_close(guid1hdl);
zfs_close(guid2hdl);
return (rv);
}
/*
* This function reestablishes the hierarchy of encryption roots after a
* recursive incremental receive has completed. This must be done after the
* second call to recv_incremental_replication() has renamed and promoted all
* sent datasets to their final locations in the dataset hierarchy.
*/
static int
recv_fix_encryption_hierarchy(libzfs_handle_t *hdl, const char *top_zfs,
nvlist_t *stream_nv)
{
int err;
nvpair_t *fselem = NULL;
nvlist_t *stream_fss;
stream_fss = fnvlist_lookup_nvlist(stream_nv, "fss");
while ((fselem = nvlist_next_nvpair(stream_fss, fselem)) != NULL) {
zfs_handle_t *zhp = NULL;
uint64_t crypt;
nvlist_t *snaps, *props, *stream_nvfs = NULL;
nvpair_t *snapel = NULL;
boolean_t is_encroot, is_clone, stream_encroot;
char *cp;
const char *stream_keylocation = NULL;
char keylocation[MAXNAMELEN];
char fsname[ZFS_MAX_DATASET_NAME_LEN];
keylocation[0] = '\0';
stream_nvfs = fnvpair_value_nvlist(fselem);
snaps = fnvlist_lookup_nvlist(stream_nvfs, "snaps");
props = fnvlist_lookup_nvlist(stream_nvfs, "props");
stream_encroot = nvlist_exists(stream_nvfs, "is_encroot");
/* find a snapshot from the stream that exists locally */
err = ENOENT;
while ((snapel = nvlist_next_nvpair(snaps, snapel)) != NULL) {
uint64_t guid;
guid = fnvpair_value_uint64(snapel);
err = guid_to_name(hdl, top_zfs, guid, B_FALSE,
fsname);
if (err == 0)
break;
}
if (err != 0)
continue;
cp = strchr(fsname, '@');
if (cp != NULL)
*cp = '\0';
zhp = zfs_open(hdl, fsname, ZFS_TYPE_DATASET);
if (zhp == NULL) {
err = ENOENT;
goto error;
}
crypt = zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION);
is_clone = zhp->zfs_dmustats.dds_origin[0] != '\0';
(void) zfs_crypto_get_encryption_root(zhp, &is_encroot, NULL);
/* we don't need to do anything for unencrypted datasets */
if (crypt == ZIO_CRYPT_OFF) {
zfs_close(zhp);
continue;
}
/*
* If the dataset is flagged as an encryption root, was not
* received as a clone and is not currently an encryption root,
* force it to become one. Fixup the keylocation if necessary.
*/
if (stream_encroot) {
if (!is_clone && !is_encroot) {
err = lzc_change_key(fsname,
DCP_CMD_FORCE_NEW_KEY, NULL, NULL, 0);
if (err != 0) {
zfs_close(zhp);
goto error;
}
}
stream_keylocation = fnvlist_lookup_string(props,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION));
/*
* Refresh the properties in case the call to
* lzc_change_key() changed the value.
*/
zfs_refresh_properties(zhp);
err = zfs_prop_get(zhp, ZFS_PROP_KEYLOCATION,
keylocation, sizeof (keylocation), NULL, NULL,
0, B_TRUE);
if (err != 0) {
zfs_close(zhp);
goto error;
}
if (strcmp(keylocation, stream_keylocation) != 0) {
err = zfs_prop_set(zhp,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION),
stream_keylocation);
if (err != 0) {
zfs_close(zhp);
goto error;
}
}
}
/*
* If the dataset is not flagged as an encryption root and is
* currently an encryption root, force it to inherit from its
* parent. The root of a raw send should never be
* force-inherited.
*/
if (!stream_encroot && is_encroot &&
strcmp(top_zfs, fsname) != 0) {
err = lzc_change_key(fsname, DCP_CMD_FORCE_INHERIT,
NULL, NULL, 0);
if (err != 0) {
zfs_close(zhp);
goto error;
}
}
zfs_close(zhp);
}
return (0);
error:
return (err);
}
static int
recv_incremental_replication(libzfs_handle_t *hdl, const char *tofs,
recvflags_t *flags, nvlist_t *stream_nv, avl_tree_t *stream_avl,
nvlist_t *renamed)
{
nvlist_t *local_nv, *deleted = NULL;
avl_tree_t *local_avl;
nvpair_t *fselem, *nextfselem;
const char *fromsnap;
char newname[ZFS_MAX_DATASET_NAME_LEN];
char guidname[32];
int error;
boolean_t needagain, progress, recursive;
const char *s1, *s2;
fromsnap = fnvlist_lookup_string(stream_nv, "fromsnap");
recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
ENOENT);
if (flags->dryrun)
return (0);
again:
needagain = progress = B_FALSE;
deleted = fnvlist_alloc();
if ((error = gather_nvlist(hdl, tofs, fromsnap, NULL,
recursive, B_TRUE, B_FALSE, recursive, B_FALSE, B_FALSE, B_FALSE,
B_FALSE, B_TRUE, &local_nv, &local_avl)) != 0)
return (error);
/*
* Process deletes and renames
*/
for (fselem = nvlist_next_nvpair(local_nv, NULL);
fselem; fselem = nextfselem) {
nvlist_t *nvfs, *snaps;
nvlist_t *stream_nvfs = NULL;
nvpair_t *snapelem, *nextsnapelem;
uint64_t fromguid = 0;
uint64_t originguid = 0;
uint64_t stream_originguid = 0;
uint64_t parent_fromsnap_guid, stream_parent_fromsnap_guid;
const char *fsname, *stream_fsname;
nextfselem = nvlist_next_nvpair(local_nv, fselem);
nvfs = fnvpair_value_nvlist(fselem);
snaps = fnvlist_lookup_nvlist(nvfs, "snaps");
fsname = fnvlist_lookup_string(nvfs, "name");
parent_fromsnap_guid = fnvlist_lookup_uint64(nvfs,
"parentfromsnap");
(void) nvlist_lookup_uint64(nvfs, "origin", &originguid);
/*
* First find the stream's fs, so we can check for
* a different origin (due to "zfs promote")
*/
for (snapelem = nvlist_next_nvpair(snaps, NULL);
snapelem; snapelem = nvlist_next_nvpair(snaps, snapelem)) {
uint64_t thisguid;
thisguid = fnvpair_value_uint64(snapelem);
stream_nvfs = fsavl_find(stream_avl, thisguid, NULL);
if (stream_nvfs != NULL)
break;
}
/* check for promote */
(void) nvlist_lookup_uint64(stream_nvfs, "origin",
&stream_originguid);
if (stream_nvfs && originguid != stream_originguid) {
switch (created_before(hdl, local_avl,
stream_originguid, originguid)) {
case 1: {
/* promote it! */
nvlist_t *origin_nvfs;
const char *origin_fsname;
origin_nvfs = fsavl_find(local_avl, originguid,
NULL);
origin_fsname = fnvlist_lookup_string(
origin_nvfs, "name");
error = recv_promote(hdl, fsname, origin_fsname,
flags);
if (error == 0)
progress = B_TRUE;
break;
}
default:
break;
case -1:
fsavl_destroy(local_avl);
fnvlist_free(local_nv);
return (-1);
}
/*
* We had/have the wrong origin, therefore our
* list of snapshots is wrong. Need to handle
* them on the next pass.
*/
needagain = B_TRUE;
continue;
}
for (snapelem = nvlist_next_nvpair(snaps, NULL);
snapelem; snapelem = nextsnapelem) {
uint64_t thisguid;
const char *stream_snapname;
nvlist_t *found, *props;
nextsnapelem = nvlist_next_nvpair(snaps, snapelem);
thisguid = fnvpair_value_uint64(snapelem);
found = fsavl_find(stream_avl, thisguid,
&stream_snapname);
/* check for delete */
if (found == NULL) {
char name[ZFS_MAX_DATASET_NAME_LEN];
if (!flags->force)
continue;
(void) snprintf(name, sizeof (name), "%s@%s",
fsname, nvpair_name(snapelem));
error = recv_destroy(hdl, name,
strlen(fsname)+1, newname, flags);
if (error)
needagain = B_TRUE;
else
progress = B_TRUE;
sprintf(guidname, "%llu",
(u_longlong_t)thisguid);
nvlist_add_boolean(deleted, guidname);
continue;
}
stream_nvfs = found;
if (0 == nvlist_lookup_nvlist(stream_nvfs, "snapprops",
&props) && 0 == nvlist_lookup_nvlist(props,
stream_snapname, &props)) {
zfs_cmd_t zc = {"\0"};
zc.zc_cookie = B_TRUE; /* received */
(void) snprintf(zc.zc_name, sizeof (zc.zc_name),
"%s@%s", fsname, nvpair_name(snapelem));
zcmd_write_src_nvlist(hdl, &zc, props);
(void) zfs_ioctl(hdl,
ZFS_IOC_SET_PROP, &zc);
zcmd_free_nvlists(&zc);
}
/* check for different snapname */
if (strcmp(nvpair_name(snapelem),
stream_snapname) != 0) {
char name[ZFS_MAX_DATASET_NAME_LEN];
char tryname[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(name, sizeof (name), "%s@%s",
fsname, nvpair_name(snapelem));
(void) snprintf(tryname, sizeof (name), "%s@%s",
fsname, stream_snapname);
error = recv_rename(hdl, name, tryname,
strlen(fsname)+1, newname, flags);
if (error)
needagain = B_TRUE;
else
progress = B_TRUE;
}
if (strcmp(stream_snapname, fromsnap) == 0)
fromguid = thisguid;
}
/* check for delete */
if (stream_nvfs == NULL) {
if (!flags->force)
continue;
error = recv_destroy(hdl, fsname, strlen(tofs)+1,
newname, flags);
if (error)
needagain = B_TRUE;
else
progress = B_TRUE;
sprintf(guidname, "%llu",
(u_longlong_t)parent_fromsnap_guid);
nvlist_add_boolean(deleted, guidname);
continue;
}
if (fromguid == 0) {
if (flags->verbose) {
(void) printf("local fs %s does not have "
"fromsnap (%s in stream); must have "
"been deleted locally; ignoring\n",
fsname, fromsnap);
}
continue;
}
stream_fsname = fnvlist_lookup_string(stream_nvfs, "name");
stream_parent_fromsnap_guid = fnvlist_lookup_uint64(
stream_nvfs, "parentfromsnap");
s1 = strrchr(fsname, '/');
s2 = strrchr(stream_fsname, '/');
/*
* Check if we're going to rename based on parent guid change
* and the current parent guid was also deleted. If it was then
* rename will fail and is likely unneeded, so avoid this and
* force an early retry to determine the new
* parent_fromsnap_guid.
*/
if (stream_parent_fromsnap_guid != 0 &&
parent_fromsnap_guid != 0 &&
stream_parent_fromsnap_guid != parent_fromsnap_guid) {
sprintf(guidname, "%llu",
(u_longlong_t)parent_fromsnap_guid);
if (nvlist_exists(deleted, guidname)) {
progress = B_TRUE;
needagain = B_TRUE;
goto doagain;
}
}
/*
* Check for rename. If the exact receive path is specified, it
* does not count as a rename, but we still need to check the
* datasets beneath it.
*/
if ((stream_parent_fromsnap_guid != 0 &&
parent_fromsnap_guid != 0 &&
stream_parent_fromsnap_guid != parent_fromsnap_guid) ||
((flags->isprefix || strcmp(tofs, fsname) != 0) &&
(s1 != NULL) && (s2 != NULL) && strcmp(s1, s2) != 0)) {
nvlist_t *parent;
char tryname[ZFS_MAX_DATASET_NAME_LEN];
parent = fsavl_find(local_avl,
stream_parent_fromsnap_guid, NULL);
/*
* NB: parent might not be found if we used the
* tosnap for stream_parent_fromsnap_guid,
* because the parent is a newly-created fs;
* we'll be able to rename it after we recv the
* new fs.
*/
if (parent != NULL) {
const char *pname;
pname = fnvlist_lookup_string(parent, "name");
(void) snprintf(tryname, sizeof (tryname),
"%s%s", pname, strrchr(stream_fsname, '/'));
} else {
tryname[0] = '\0';
if (flags->verbose) {
(void) printf("local fs %s new parent "
"not found\n", fsname);
}
}
newname[0] = '\0';
error = recv_rename(hdl, fsname, tryname,
strlen(tofs)+1, newname, flags);
if (renamed != NULL && newname[0] != '\0') {
fnvlist_add_boolean(renamed, newname);
}
if (error)
needagain = B_TRUE;
else
progress = B_TRUE;
}
}
doagain:
fsavl_destroy(local_avl);
fnvlist_free(local_nv);
fnvlist_free(deleted);
if (needagain && progress) {
/* do another pass to fix up temporary names */
if (flags->verbose)
(void) printf("another pass:\n");
goto again;
}
return (needagain || error != 0);
}
static int
zfs_receive_package(libzfs_handle_t *hdl, int fd, const char *destname,
recvflags_t *flags, dmu_replay_record_t *drr, zio_cksum_t *zc,
char **top_zfs, nvlist_t *cmdprops)
{
nvlist_t *stream_nv = NULL;
avl_tree_t *stream_avl = NULL;
const char *fromsnap = NULL;
const char *sendsnap = NULL;
char *cp;
char tofs[ZFS_MAX_DATASET_NAME_LEN];
char sendfs[ZFS_MAX_DATASET_NAME_LEN];
char errbuf[ERRBUFLEN];
dmu_replay_record_t drre;
int error;
boolean_t anyerr = B_FALSE;
boolean_t softerr = B_FALSE;
boolean_t recursive, raw;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive"));
assert(drr->drr_type == DRR_BEGIN);
assert(drr->drr_u.drr_begin.drr_magic == DMU_BACKUP_MAGIC);
assert(DMU_GET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo) ==
DMU_COMPOUNDSTREAM);
/*
* Read in the nvlist from the stream.
*/
if (drr->drr_payloadlen != 0) {
error = recv_read_nvlist(hdl, fd, drr->drr_payloadlen,
&stream_nv, flags->byteswap, zc);
if (error) {
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
}
recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
ENOENT);
raw = (nvlist_lookup_boolean(stream_nv, "raw") == 0);
if (recursive && strchr(destname, '@')) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot specify snapshot name for multi-snapshot stream"));
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
/*
* Read in the end record and verify checksum.
*/
if (0 != (error = recv_read(hdl, fd, &drre, sizeof (drre),
flags->byteswap, NULL)))
goto out;
if (flags->byteswap) {
drre.drr_type = BSWAP_32(drre.drr_type);
drre.drr_u.drr_end.drr_checksum.zc_word[0] =
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[0]);
drre.drr_u.drr_end.drr_checksum.zc_word[1] =
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[1]);
drre.drr_u.drr_end.drr_checksum.zc_word[2] =
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[2]);
drre.drr_u.drr_end.drr_checksum.zc_word[3] =
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[3]);
}
if (drre.drr_type != DRR_END) {
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
if (!ZIO_CHECKSUM_EQUAL(drre.drr_u.drr_end.drr_checksum, *zc)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incorrect header checksum"));
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
(void) nvlist_lookup_string(stream_nv, "fromsnap", &fromsnap);
if (drr->drr_payloadlen != 0) {
nvlist_t *stream_fss;
stream_fss = fnvlist_lookup_nvlist(stream_nv, "fss");
if ((stream_avl = fsavl_create(stream_fss)) == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"couldn't allocate avl tree"));
error = zfs_error(hdl, EZFS_NOMEM, errbuf);
goto out;
}
if (fromsnap != NULL && recursive) {
nvlist_t *renamed = NULL;
nvpair_t *pair = NULL;
(void) strlcpy(tofs, destname, sizeof (tofs));
if (flags->isprefix) {
struct drr_begin *drrb = &drr->drr_u.drr_begin;
int i;
if (flags->istail) {
cp = strrchr(drrb->drr_toname, '/');
if (cp == NULL) {
(void) strlcat(tofs, "/",
sizeof (tofs));
i = 0;
} else {
i = (cp - drrb->drr_toname);
}
} else {
i = strcspn(drrb->drr_toname, "/@");
}
/* zfs_receive_one() will create_parents() */
(void) strlcat(tofs, &drrb->drr_toname[i],
sizeof (tofs));
*strchr(tofs, '@') = '\0';
}
if (!flags->dryrun && !flags->nomount) {
renamed = fnvlist_alloc();
}
softerr = recv_incremental_replication(hdl, tofs, flags,
stream_nv, stream_avl, renamed);
/* Unmount renamed filesystems before receiving. */
while ((pair = nvlist_next_nvpair(renamed,
pair)) != NULL) {
zfs_handle_t *zhp;
prop_changelist_t *clp = NULL;
zhp = zfs_open(hdl, nvpair_name(pair),
ZFS_TYPE_FILESYSTEM);
if (zhp != NULL) {
clp = changelist_gather(zhp,
ZFS_PROP_MOUNTPOINT, 0,
flags->forceunmount ? MS_FORCE : 0);
zfs_close(zhp);
if (clp != NULL) {
softerr |=
changelist_prefix(clp);
changelist_free(clp);
}
}
}
fnvlist_free(renamed);
}
}
/*
* Get the fs specified by the first path in the stream (the top level
* specified by 'zfs send') and pass it to each invocation of
* zfs_receive_one().
*/
(void) strlcpy(sendfs, drr->drr_u.drr_begin.drr_toname,
sizeof (sendfs));
if ((cp = strchr(sendfs, '@')) != NULL) {
*cp = '\0';
/*
* Find the "sendsnap", the final snapshot in a replication
* stream. zfs_receive_one() handles certain errors
* differently, depending on if the contained stream is the
* last one or not.
*/
sendsnap = (cp + 1);
}
/* Finally, receive each contained stream */
do {
/*
* we should figure out if it has a recoverable
* error, in which case do a recv_skip() and drive on.
* Note, if we fail due to already having this guid,
* zfs_receive_one() will take care of it (ie,
* recv_skip() and return 0).
*/
error = zfs_receive_impl(hdl, destname, NULL, flags, fd,
sendfs, stream_nv, stream_avl, top_zfs, sendsnap, cmdprops);
if (error == ENODATA) {
error = 0;
break;
}
anyerr |= error;
} while (error == 0);
if (drr->drr_payloadlen != 0 && recursive && fromsnap != NULL) {
/*
* Now that we have the fs's they sent us, try the
* renames again.
*/
softerr = recv_incremental_replication(hdl, tofs, flags,
stream_nv, stream_avl, NULL);
}
if (raw && softerr == 0 && *top_zfs != NULL) {
softerr = recv_fix_encryption_hierarchy(hdl, *top_zfs,
stream_nv);
}
out:
fsavl_destroy(stream_avl);
fnvlist_free(stream_nv);
if (softerr)
error = -2;
if (anyerr)
error = -1;
return (error);
}
static void
trunc_prop_errs(int truncated)
{
ASSERT(truncated != 0);
if (truncated == 1)
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"1 more property could not be set\n"));
else
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"%d more properties could not be set\n"), truncated);
}
static int
recv_skip(libzfs_handle_t *hdl, int fd, boolean_t byteswap)
{
dmu_replay_record_t *drr;
void *buf = zfs_alloc(hdl, SPA_MAXBLOCKSIZE);
uint64_t payload_size;
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive"));
/* XXX would be great to use lseek if possible... */
drr = buf;
while (recv_read(hdl, fd, drr, sizeof (dmu_replay_record_t),
byteswap, NULL) == 0) {
if (byteswap)
drr->drr_type = BSWAP_32(drr->drr_type);
switch (drr->drr_type) {
case DRR_BEGIN:
if (drr->drr_payloadlen != 0) {
(void) recv_read(hdl, fd, buf,
drr->drr_payloadlen, B_FALSE, NULL);
}
break;
case DRR_END:
free(buf);
return (0);
case DRR_OBJECT:
if (byteswap) {
drr->drr_u.drr_object.drr_bonuslen =
BSWAP_32(drr->drr_u.drr_object.
drr_bonuslen);
drr->drr_u.drr_object.drr_raw_bonuslen =
BSWAP_32(drr->drr_u.drr_object.
drr_raw_bonuslen);
}
payload_size =
DRR_OBJECT_PAYLOAD_SIZE(&drr->drr_u.drr_object);
(void) recv_read(hdl, fd, buf, payload_size,
B_FALSE, NULL);
break;
case DRR_WRITE:
if (byteswap) {
drr->drr_u.drr_write.drr_logical_size =
BSWAP_64(
drr->drr_u.drr_write.drr_logical_size);
drr->drr_u.drr_write.drr_compressed_size =
BSWAP_64(
drr->drr_u.drr_write.drr_compressed_size);
}
payload_size =
DRR_WRITE_PAYLOAD_SIZE(&drr->drr_u.drr_write);
assert(payload_size <= SPA_MAXBLOCKSIZE);
(void) recv_read(hdl, fd, buf,
payload_size, B_FALSE, NULL);
break;
case DRR_SPILL:
if (byteswap) {
drr->drr_u.drr_spill.drr_length =
BSWAP_64(drr->drr_u.drr_spill.drr_length);
drr->drr_u.drr_spill.drr_compressed_size =
BSWAP_64(drr->drr_u.drr_spill.
drr_compressed_size);
}
payload_size =
DRR_SPILL_PAYLOAD_SIZE(&drr->drr_u.drr_spill);
(void) recv_read(hdl, fd, buf, payload_size,
B_FALSE, NULL);
break;
case DRR_WRITE_EMBEDDED:
if (byteswap) {
drr->drr_u.drr_write_embedded.drr_psize =
BSWAP_32(drr->drr_u.drr_write_embedded.
drr_psize);
}
(void) recv_read(hdl, fd, buf,
P2ROUNDUP(drr->drr_u.drr_write_embedded.drr_psize,
8), B_FALSE, NULL);
break;
case DRR_OBJECT_RANGE:
case DRR_WRITE_BYREF:
case DRR_FREEOBJECTS:
case DRR_FREE:
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid record type"));
free(buf);
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
}
}
free(buf);
return (-1);
}
static void
recv_ecksum_set_aux(libzfs_handle_t *hdl, const char *target_snap,
boolean_t resumable, boolean_t checksum)
{
char target_fs[ZFS_MAX_DATASET_NAME_LEN];
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, (checksum ?
"checksum mismatch" : "incomplete stream")));
if (!resumable)
return;
(void) strlcpy(target_fs, target_snap, sizeof (target_fs));
*strchr(target_fs, '@') = '\0';
zfs_handle_t *zhp = zfs_open(hdl, target_fs,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
return;
char token_buf[ZFS_MAXPROPLEN];
int error = zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
token_buf, sizeof (token_buf),
NULL, NULL, 0, B_TRUE);
if (error == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"checksum mismatch or incomplete stream.\n"
"Partially received snapshot is saved.\n"
"A resuming stream can be generated on the sending "
"system by running:\n"
" zfs send -t %s"),
token_buf);
}
zfs_close(zhp);
}
/*
* Prepare a new nvlist of properties that are to override (-o) or be excluded
* (-x) from the received dataset
* recvprops: received properties from the send stream
* cmdprops: raw input properties from command line
* origprops: properties, both locally-set and received, currently set on the
* target dataset if it exists, NULL otherwise.
* oxprops: valid output override (-o) and excluded (-x) properties
*/
static int
zfs_setup_cmdline_props(libzfs_handle_t *hdl, zfs_type_t type,
char *fsname, boolean_t zoned, boolean_t recursive, boolean_t newfs,
boolean_t raw, boolean_t toplevel, nvlist_t *recvprops, nvlist_t *cmdprops,
nvlist_t *origprops, nvlist_t **oxprops, uint8_t **wkeydata_out,
uint_t *wkeylen_out, const char *errbuf)
{
nvpair_t *nvp;
nvlist_t *oprops, *voprops;
zfs_handle_t *zhp = NULL;
zpool_handle_t *zpool_hdl = NULL;
char *cp;
int ret = 0;
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
if (nvlist_empty(cmdprops))
return (0); /* No properties to override or exclude */
*oxprops = fnvlist_alloc();
oprops = fnvlist_alloc();
strlcpy(namebuf, fsname, ZFS_MAX_DATASET_NAME_LEN);
/*
* Get our dataset handle. The target dataset may not exist yet.
*/
if (zfs_dataset_exists(hdl, namebuf, ZFS_TYPE_DATASET)) {
zhp = zfs_open(hdl, namebuf, ZFS_TYPE_DATASET);
if (zhp == NULL) {
ret = -1;
goto error;
}
}
/* open the zpool handle */
cp = strchr(namebuf, '/');
if (cp != NULL)
*cp = '\0';
zpool_hdl = zpool_open(hdl, namebuf);
if (zpool_hdl == NULL) {
ret = -1;
goto error;
}
/* restore namebuf to match fsname for later use */
if (cp != NULL)
*cp = '/';
/*
* first iteration: process excluded (-x) properties now and gather
* added (-o) properties to be later processed by zfs_valid_proplist()
*/
nvp = NULL;
while ((nvp = nvlist_next_nvpair(cmdprops, nvp)) != NULL) {
const char *name = nvpair_name(nvp);
zfs_prop_t prop = zfs_name_to_prop(name);
/*
* It turns out, if we don't normalize "aliased" names
* e.g. compress= against the "real" names (e.g. compression)
* here, then setting/excluding them does not work as
* intended.
*
* But since user-defined properties wouldn't have a valid
* mapping here, we do this conditional dance.
*/
const char *newname = name;
if (prop >= ZFS_PROP_TYPE)
newname = zfs_prop_to_name(prop);
/* "origin" is processed separately, don't handle it here */
if (prop == ZFS_PROP_ORIGIN)
continue;
/* raw streams can't override encryption properties */
if ((zfs_prop_encryption_key_param(prop) ||
prop == ZFS_PROP_ENCRYPTION) && raw) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption property '%s' cannot "
"be set or excluded for raw streams."), name);
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
/*
* For plain replicated send, we can ignore encryption
* properties other than first stream
*/
if ((zfs_prop_encryption_key_param(prop) || prop ==
ZFS_PROP_ENCRYPTION) && !newfs && recursive && !raw) {
continue;
}
/* incremental streams can only exclude encryption properties */
if ((zfs_prop_encryption_key_param(prop) ||
prop == ZFS_PROP_ENCRYPTION) && !newfs &&
nvpair_type(nvp) != DATA_TYPE_BOOLEAN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption property '%s' cannot "
"be set for incremental streams."), name);
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
switch (nvpair_type(nvp)) {
case DATA_TYPE_BOOLEAN: /* -x property */
/*
* DATA_TYPE_BOOLEAN is the way we're asked to "exclude"
* a property: this is done by forcing an explicit
* inherit on the destination so the effective value is
* not the one we received from the send stream.
*/
if (!zfs_prop_valid_for_type(prop, type, B_FALSE) &&
!zfs_prop_user(name)) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"Warning: %s: property '%s' does not "
"apply to datasets of this type\n"),
fsname, name);
continue;
}
/*
* We do this only if the property is not already
* locally-set, in which case its value will take
* priority over the received anyway.
*/
if (nvlist_exists(origprops, newname)) {
nvlist_t *attrs;
const char *source = NULL;
attrs = fnvlist_lookup_nvlist(origprops,
newname);
if (nvlist_lookup_string(attrs,
ZPROP_SOURCE, &source) == 0 &&
strcmp(source, ZPROP_SOURCE_VAL_RECVD) != 0)
continue;
}
/*
* We can't force an explicit inherit on non-inheritable
* properties: if we're asked to exclude this kind of
* values we remove them from "recvprops" input nvlist.
*/
if (!zfs_prop_user(name) && /* can be inherited too */
!zfs_prop_inheritable(prop) &&
nvlist_exists(recvprops, newname))
fnvlist_remove(recvprops, newname);
else
fnvlist_add_boolean(*oxprops, newname);
break;
case DATA_TYPE_STRING: /* -o property=value */
/*
* we're trying to override a property that does not
* make sense for this type of dataset, but we don't
* want to fail if the receive is recursive: this comes
* in handy when the send stream contains, for
* instance, a child ZVOL and we're trying to receive
* it with "-o atime=on"
*/
if (!zfs_prop_valid_for_type(prop, type, B_FALSE) &&
!zfs_prop_user(name)) {
if (recursive)
continue;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' does not apply to datasets "
"of this type"), name);
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
fnvlist_add_string(oprops, newname,
fnvpair_value_string(nvp));
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' must be a string or boolean"), name);
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
}
if (toplevel) {
/* convert override strings properties to native */
if ((voprops = zfs_valid_proplist(hdl, ZFS_TYPE_DATASET,
oprops, zoned, zhp, zpool_hdl, B_FALSE, errbuf)) == NULL) {
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
/*
* zfs_crypto_create() requires the parent name. Get it
* by truncating the fsname copy stored in namebuf.
*/
cp = strrchr(namebuf, '/');
if (cp != NULL)
*cp = '\0';
if (!raw && !(!newfs && recursive) &&
zfs_crypto_create(hdl, namebuf, voprops, NULL,
B_FALSE, wkeydata_out, wkeylen_out) != 0) {
fnvlist_free(voprops);
ret = zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
goto error;
}
/* second pass: process "-o" properties */
fnvlist_merge(*oxprops, voprops);
fnvlist_free(voprops);
} else {
/* override props on child dataset are inherited */
nvp = NULL;
while ((nvp = nvlist_next_nvpair(oprops, nvp)) != NULL) {
const char *name = nvpair_name(nvp);
fnvlist_add_boolean(*oxprops, name);
}
}
error:
if (zhp != NULL)
zfs_close(zhp);
if (zpool_hdl != NULL)
zpool_close(zpool_hdl);
fnvlist_free(oprops);
return (ret);
}
/*
* Restores a backup of tosnap from the file descriptor specified by infd.
*/
static int
zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
const char *originsnap, recvflags_t *flags, dmu_replay_record_t *drr,
dmu_replay_record_t *drr_noswap, const char *sendfs, nvlist_t *stream_nv,
avl_tree_t *stream_avl, char **top_zfs,
const char *finalsnap, nvlist_t *cmdprops)
{
struct timespec begin_time;
int ioctl_err, ioctl_errno, err;
char *cp;
struct drr_begin *drrb = &drr->drr_u.drr_begin;
char errbuf[ERRBUFLEN];
const char *chopprefix;
boolean_t newfs = B_FALSE;
boolean_t stream_wantsnewfs, stream_resumingnewfs;
boolean_t newprops = B_FALSE;
uint64_t read_bytes = 0;
uint64_t errflags = 0;
uint64_t parent_snapguid = 0;
prop_changelist_t *clp = NULL;
nvlist_t *snapprops_nvlist = NULL;
nvlist_t *snapholds_nvlist = NULL;
zprop_errflags_t prop_errflags;
nvlist_t *prop_errors = NULL;
boolean_t recursive;
const char *snapname = NULL;
char destsnap[MAXPATHLEN * 2];
char origin[MAXNAMELEN] = {0};
char name[MAXPATHLEN];
char tmp_keylocation[MAXNAMELEN] = {0};
nvlist_t *rcvprops = NULL; /* props received from the send stream */
nvlist_t *oxprops = NULL; /* override (-o) and exclude (-x) props */
nvlist_t *origprops = NULL; /* original props (if destination exists) */
zfs_type_t type = ZFS_TYPE_INVALID;
boolean_t toplevel = B_FALSE;
boolean_t zoned = B_FALSE;
boolean_t hastoken = B_FALSE;
boolean_t redacted;
uint8_t *wkeydata = NULL;
uint_t wkeylen = 0;
#ifndef CLOCK_MONOTONIC_RAW
#define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC
#endif
clock_gettime(CLOCK_MONOTONIC_RAW, &begin_time);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive"));
recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
ENOENT);
/* Did the user request holds be skipped via zfs recv -k? */
boolean_t holds = flags->holds && !flags->skipholds;
if (stream_avl != NULL) {
const char *keylocation = NULL;
nvlist_t *lookup = NULL;
nvlist_t *fs = fsavl_find(stream_avl, drrb->drr_toguid,
&snapname);
(void) nvlist_lookup_uint64(fs, "parentfromsnap",
&parent_snapguid);
err = nvlist_lookup_nvlist(fs, "props", &rcvprops);
if (err) {
rcvprops = fnvlist_alloc();
newprops = B_TRUE;
}
/*
* The keylocation property may only be set on encryption roots,
* but this dataset might not become an encryption root until
* recv_fix_encryption_hierarchy() is called. That function
* will fixup the keylocation anyway, so we temporarily unset
* the keylocation for now to avoid any errors from the receive
* ioctl.
*/
err = nvlist_lookup_string(rcvprops,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), &keylocation);
if (err == 0) {
strlcpy(tmp_keylocation, keylocation, MAXNAMELEN);
(void) nvlist_remove_all(rcvprops,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION));
}
if (flags->canmountoff) {
fnvlist_add_uint64(rcvprops,
zfs_prop_to_name(ZFS_PROP_CANMOUNT), 0);
} else if (newprops) { /* nothing in rcvprops, eliminate it */
fnvlist_free(rcvprops);
rcvprops = NULL;
newprops = B_FALSE;
}
if (0 == nvlist_lookup_nvlist(fs, "snapprops", &lookup)) {
snapprops_nvlist = fnvlist_lookup_nvlist(lookup,
snapname);
}
if (holds) {
if (0 == nvlist_lookup_nvlist(fs, "snapholds",
&lookup)) {
snapholds_nvlist = fnvlist_lookup_nvlist(
lookup, snapname);
}
}
}
cp = NULL;
/*
* Determine how much of the snapshot name stored in the stream
* we are going to tack on to the name they specified on the
* command line, and how much we are going to chop off.
*
* If they specified a snapshot, chop the entire name stored in
* the stream.
*/
if (flags->istail) {
/*
* A filesystem was specified with -e. We want to tack on only
* the tail of the sent snapshot path.
*/
if (strchr(tosnap, '@')) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"argument - snapshot not allowed with -e"));
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto out;
}
chopprefix = strrchr(sendfs, '/');
if (chopprefix == NULL) {
/*
* The tail is the poolname, so we need to
* prepend a path separator.
*/
int len = strlen(drrb->drr_toname);
cp = umem_alloc(len + 2, UMEM_NOFAIL);
cp[0] = '/';
(void) strcpy(&cp[1], drrb->drr_toname);
chopprefix = cp;
} else {
chopprefix = drrb->drr_toname + (chopprefix - sendfs);
}
} else if (flags->isprefix) {
/*
* A filesystem was specified with -d. We want to tack on
* everything but the first element of the sent snapshot path
* (all but the pool name).
*/
if (strchr(tosnap, '@')) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"argument - snapshot not allowed with -d"));
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto out;
}
chopprefix = strchr(drrb->drr_toname, '/');
if (chopprefix == NULL)
chopprefix = strchr(drrb->drr_toname, '@');
} else if (strchr(tosnap, '@') == NULL) {
/*
* If a filesystem was specified without -d or -e, we want to
* tack on everything after the fs specified by 'zfs send'.
*/
chopprefix = drrb->drr_toname + strlen(sendfs);
} else {
/* A snapshot was specified as an exact path (no -d or -e). */
if (recursive) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot specify snapshot name for multi-snapshot "
"stream"));
err = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
chopprefix = drrb->drr_toname + strlen(drrb->drr_toname);
}
ASSERT(strstr(drrb->drr_toname, sendfs) == drrb->drr_toname);
ASSERT(chopprefix > drrb->drr_toname || strchr(sendfs, '/') == NULL);
ASSERT(chopprefix <= drrb->drr_toname + strlen(drrb->drr_toname) ||
strchr(sendfs, '/') == NULL);
ASSERT(chopprefix[0] == '/' || chopprefix[0] == '@' ||
chopprefix[0] == '\0');
/*
* Determine name of destination snapshot.
*/
(void) strlcpy(destsnap, tosnap, sizeof (destsnap));
(void) strlcat(destsnap, chopprefix, sizeof (destsnap));
if (cp != NULL)
umem_free(cp, strlen(cp) + 1);
if (!zfs_name_valid(destsnap, ZFS_TYPE_SNAPSHOT)) {
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto out;
}
/*
* Determine the name of the origin snapshot.
*/
if (originsnap) {
(void) strlcpy(origin, originsnap, sizeof (origin));
if (flags->verbose)
(void) printf("using provided clone origin %s\n",
origin);
} else if (drrb->drr_flags & DRR_FLAG_CLONE) {
if (guid_to_name(hdl, destsnap,
drrb->drr_fromguid, B_FALSE, origin) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"local origin for clone %s does not exist"),
destsnap);
err = zfs_error(hdl, EZFS_NOENT, errbuf);
goto out;
}
if (flags->verbose)
(void) printf("found clone origin %s\n", origin);
}
if ((DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_DEDUP)) {
(void) fprintf(stderr,
gettext("ERROR: \"zfs receive\" no longer supports "
"deduplicated send streams. Use\n"
"the \"zstream redup\" command to convert this stream "
"to a regular,\n"
"non-deduplicated stream.\n"));
err = zfs_error(hdl, EZFS_NOTSUP, errbuf);
goto out;
}
boolean_t resuming = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_RESUMING;
boolean_t raw = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_RAW;
boolean_t embedded = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_EMBED_DATA;
stream_wantsnewfs = (drrb->drr_fromguid == 0 ||
(drrb->drr_flags & DRR_FLAG_CLONE) || originsnap) && !resuming;
stream_resumingnewfs = (drrb->drr_fromguid == 0 ||
(drrb->drr_flags & DRR_FLAG_CLONE) || originsnap) && resuming;
if (stream_wantsnewfs) {
/*
* if the parent fs does not exist, look for it based on
* the parent snap GUID
*/
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive new filesystem stream"));
(void) strlcpy(name, destsnap, sizeof (name));
cp = strrchr(name, '/');
if (cp)
*cp = '\0';
if (cp &&
!zfs_dataset_exists(hdl, name, ZFS_TYPE_DATASET)) {
char suffix[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(suffix, strrchr(destsnap, '/'),
sizeof (suffix));
if (guid_to_name(hdl, name, parent_snapguid,
B_FALSE, destsnap) == 0) {
*strchr(destsnap, '@') = '\0';
(void) strlcat(destsnap, suffix,
sizeof (destsnap));
}
}
} else {
/*
* If the fs does not exist, look for it based on the
* fromsnap GUID.
*/
if (resuming) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot receive resume stream"));
} else {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot receive incremental stream"));
}
(void) strlcpy(name, destsnap, sizeof (name));
*strchr(name, '@') = '\0';
/*
* If the exact receive path was specified and this is the
* topmost path in the stream, then if the fs does not exist we
* should look no further.
*/
if ((flags->isprefix || (*(chopprefix = drrb->drr_toname +
strlen(sendfs)) != '\0' && *chopprefix != '@')) &&
!zfs_dataset_exists(hdl, name, ZFS_TYPE_DATASET)) {
char snap[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(snap, strchr(destsnap, '@'),
sizeof (snap));
if (guid_to_name(hdl, name, drrb->drr_fromguid,
B_FALSE, destsnap) == 0) {
*strchr(destsnap, '@') = '\0';
(void) strlcat(destsnap, snap,
sizeof (destsnap));
}
}
}
(void) strlcpy(name, destsnap, sizeof (name));
*strchr(name, '@') = '\0';
redacted = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_REDACTED;
if (flags->heal) {
if (flags->isprefix || flags->istail || flags->force ||
flags->canmountoff || flags->resumable || flags->nomount ||
flags->skipholds) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"corrective recv can not be used when combined with"
" this flag"));
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto out;
}
uint64_t guid =
get_snap_guid(hdl, name, strchr(destsnap, '@') + 1);
if (guid == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"corrective recv must specify an existing snapshot"
" to heal"));
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto out;
} else if (guid != drrb->drr_toguid) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"local snapshot doesn't match the snapshot"
" in the provided stream"));
err = zfs_error(hdl, EZFS_WRONG_PARENT, errbuf);
goto out;
}
} else if (zfs_dataset_exists(hdl, name, ZFS_TYPE_DATASET)) {
zfs_cmd_t zc = {"\0"};
zfs_handle_t *zhp = NULL;
boolean_t encrypted;
(void) strcpy(zc.zc_name, name);
/*
* Destination fs exists. It must be one of these cases:
* - an incremental send stream
* - the stream specifies a new fs (full stream or clone)
* and they want us to blow away the existing fs (and
* have therefore specified -F and removed any snapshots)
* - we are resuming a failed receive.
*/
if (stream_wantsnewfs) {
boolean_t is_volume = drrb->drr_type == DMU_OST_ZVOL;
if (!flags->force) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination '%s' exists\n"
"must specify -F to overwrite it"), name);
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
goto out;
}
if (zfs_ioctl(hdl, ZFS_IOC_SNAPSHOT_LIST_NEXT,
&zc) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination has snapshots (eg. %s)\n"
"must destroy them to overwrite it"),
zc.zc_name);
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
goto out;
}
if (is_volume && strrchr(name, '/') == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination %s is the root dataset\n"
"cannot overwrite with a ZVOL"),
name);
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
goto out;
}
if (is_volume &&
zfs_ioctl(hdl, ZFS_IOC_DATASET_LIST_NEXT,
&zc) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination has children (eg. %s)\n"
"cannot overwrite with a ZVOL"),
zc.zc_name);
err = zfs_error(hdl, EZFS_WRONG_PARENT, errbuf);
goto out;
}
}
if ((zhp = zfs_open(hdl, name,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)) == NULL) {
err = -1;
goto out;
}
/*
* When receiving full/newfs on existing dataset, then it
* should be done with "-F" flag. Its enforced for initial
* receive in previous checks in this function.
* Similarly, on resuming full/newfs recv on existing dataset,
* it should be done with "-F" flag.
*
* When dataset doesn't exist, then full/newfs recv is done on
* newly created dataset and it's marked INCONSISTENT. But
* When receiving on existing dataset, recv is first done on
* %recv and its marked INCONSISTENT. Existing dataset is not
* marked INCONSISTENT.
* Resume of full/newfs receive with dataset not INCONSISTENT
* indicates that its resuming newfs on existing dataset. So,
* enforce "-F" flag in this case.
*/
if (stream_resumingnewfs &&
!zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) &&
!flags->force) {
zfs_close(zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Resuming recv on existing destination '%s'\n"
"must specify -F to overwrite it"), name);
err = zfs_error(hdl, EZFS_RESUME_EXISTS, errbuf);
goto out;
}
if (stream_wantsnewfs &&
zhp->zfs_dmustats.dds_origin[0]) {
zfs_close(zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination '%s' is a clone\n"
"must destroy it to overwrite it"), name);
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
goto out;
}
/*
* Raw sends can not be performed as an incremental on top
* of existing unencrypted datasets. zfs recv -F can't be
* used to blow away an existing encrypted filesystem. This
* is because it would require the dsl dir to point to the
* new key (or lack of a key) and the old key at the same
* time. The -F flag may still be used for deleting
* intermediate snapshots that would otherwise prevent the
* receive from working.
*/
encrypted = zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) !=
ZIO_CRYPT_OFF;
if (!stream_wantsnewfs && !encrypted && raw) {
zfs_close(zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot perform raw receive on top of "
"existing unencrypted dataset"));
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
goto out;
}
if (stream_wantsnewfs && flags->force &&
((raw && !encrypted) || encrypted)) {
zfs_close(zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"zfs receive -F cannot be used to destroy an "
"encrypted filesystem or overwrite an "
"unencrypted one with an encrypted one"));
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
goto out;
}
if (!flags->dryrun && zhp->zfs_type == ZFS_TYPE_FILESYSTEM &&
(stream_wantsnewfs || stream_resumingnewfs)) {
/* We can't do online recv in this case */
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
flags->forceunmount ? MS_FORCE : 0);
if (clp == NULL) {
zfs_close(zhp);
err = -1;
goto out;
}
if (changelist_prefix(clp) != 0) {
changelist_free(clp);
zfs_close(zhp);
err = -1;
goto out;
}
}
/*
* If we are resuming a newfs, set newfs here so that we will
* mount it if the recv succeeds this time. We can tell
* that it was a newfs on the first recv because the fs
* itself will be inconsistent (if the fs existed when we
* did the first recv, we would have received it into
* .../%recv).
*/
if (resuming && zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT))
newfs = B_TRUE;
/* we want to know if we're zoned when validating -o|-x props */
zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
/* may need this info later, get it now we have zhp around */
if (zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN, NULL, 0,
NULL, NULL, 0, B_TRUE) == 0)
hastoken = B_TRUE;
/* gather existing properties on destination */
origprops = fnvlist_alloc();
fnvlist_merge(origprops, zhp->zfs_props);
fnvlist_merge(origprops, zhp->zfs_user_props);
zfs_close(zhp);
} else {
zfs_handle_t *zhp;
/*
* Destination filesystem does not exist. Therefore we better
* be creating a new filesystem (either from a full backup, or
* a clone). It would therefore be invalid if the user
* specified only the pool name (i.e. if the destination name
* contained no slash character).
*/
cp = strrchr(name, '/');
if (!stream_wantsnewfs || cp == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination '%s' does not exist"), name);
err = zfs_error(hdl, EZFS_NOENT, errbuf);
goto out;
}
/*
* Trim off the final dataset component so we perform the
* recvbackup ioctl to the filesystems's parent.
*/
*cp = '\0';
if (flags->isprefix && !flags->istail && !flags->dryrun &&
create_parents(hdl, destsnap, strlen(tosnap)) != 0) {
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
goto out;
}
/* validate parent */
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
if (zhp == NULL) {
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
goto out;
}
if (zfs_get_type(zhp) != ZFS_TYPE_FILESYSTEM) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"parent '%s' is not a filesystem"), name);
err = zfs_error(hdl, EZFS_WRONG_PARENT, errbuf);
zfs_close(zhp);
goto out;
}
zfs_close(zhp);
newfs = B_TRUE;
*cp = '/';
}
if (flags->verbose) {
(void) printf("%s %s%s stream of %s into %s\n",
flags->dryrun ? "would receive" : "receiving",
flags->heal ? " corrective" : "",
drrb->drr_fromguid ? "incremental" : "full",
drrb->drr_toname, destsnap);
(void) fflush(stdout);
}
/*
* If this is the top-level dataset, record it so we can use it
* for recursive operations later.
*/
if (top_zfs != NULL &&
(*top_zfs == NULL || strcmp(*top_zfs, name) == 0)) {
toplevel = B_TRUE;
if (*top_zfs == NULL)
*top_zfs = zfs_strdup(hdl, name);
}
if (drrb->drr_type == DMU_OST_ZVOL) {
type = ZFS_TYPE_VOLUME;
} else if (drrb->drr_type == DMU_OST_ZFS) {
type = ZFS_TYPE_FILESYSTEM;
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid record type: 0x%d"), drrb->drr_type);
err = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
if ((err = zfs_setup_cmdline_props(hdl, type, name, zoned, recursive,
stream_wantsnewfs, raw, toplevel, rcvprops, cmdprops, origprops,
&oxprops, &wkeydata, &wkeylen, errbuf)) != 0)
goto out;
/*
* When sending with properties (zfs send -p), the encryption property
* is not included because it is a SETONCE property and therefore
* treated as read only. However, we are always able to determine its
* value because raw sends will include it in the DRR_BDEGIN payload
* and non-raw sends with properties are not allowed for encrypted
* datasets. Therefore, if this is a non-raw properties stream, we can
* infer that the value should be ZIO_CRYPT_OFF and manually add that
* to the received properties.
*/
if (stream_wantsnewfs && !raw && rcvprops != NULL &&
!nvlist_exists(cmdprops, zfs_prop_to_name(ZFS_PROP_ENCRYPTION))) {
if (oxprops == NULL)
oxprops = fnvlist_alloc();
fnvlist_add_uint64(oxprops,
zfs_prop_to_name(ZFS_PROP_ENCRYPTION), ZIO_CRYPT_OFF);
}
if (flags->dryrun) {
void *buf = zfs_alloc(hdl, SPA_MAXBLOCKSIZE);
/*
* We have read the DRR_BEGIN record, but we have
* not yet read the payload. For non-dryrun sends
* this will be done by the kernel, so we must
* emulate that here, before attempting to read
* more records.
*/
err = recv_read(hdl, infd, buf, drr->drr_payloadlen,
flags->byteswap, NULL);
free(buf);
if (err != 0)
goto out;
err = recv_skip(hdl, infd, flags->byteswap);
goto out;
}
if (flags->heal) {
err = ioctl_err = lzc_receive_with_heal(destsnap, rcvprops,
oxprops, wkeydata, wkeylen, origin, flags->force,
flags->heal, flags->resumable, raw, infd, drr_noswap, -1,
&read_bytes, &errflags, NULL, &prop_errors);
} else {
err = ioctl_err = lzc_receive_with_cmdprops(destsnap, rcvprops,
oxprops, wkeydata, wkeylen, origin, flags->force,
flags->resumable, raw, infd, drr_noswap, -1, &read_bytes,
&errflags, NULL, &prop_errors);
}
ioctl_errno = ioctl_err;
prop_errflags = errflags;
if (err == 0) {
nvpair_t *prop_err = NULL;
while ((prop_err = nvlist_next_nvpair(prop_errors,
prop_err)) != NULL) {
char tbuf[1024];
zfs_prop_t prop;
int intval;
prop = zfs_name_to_prop(nvpair_name(prop_err));
(void) nvpair_value_int32(prop_err, &intval);
if (strcmp(nvpair_name(prop_err),
ZPROP_N_MORE_ERRORS) == 0) {
trunc_prop_errs(intval);
break;
} else if (snapname == NULL || finalsnap == NULL ||
strcmp(finalsnap, snapname) == 0 ||
strcmp(nvpair_name(prop_err),
zfs_prop_to_name(ZFS_PROP_REFQUOTA)) != 0) {
/*
* Skip the special case of, for example,
* "refquota", errors on intermediate
* snapshots leading up to a final one.
* That's why we have all of the checks above.
*
* See zfs_ioctl.c's extract_delay_props() for
* a list of props which can fail on
* intermediate snapshots, but shouldn't
* affect the overall receive.
*/
(void) snprintf(tbuf, sizeof (tbuf),
dgettext(TEXT_DOMAIN,
"cannot receive %s property on %s"),
nvpair_name(prop_err), name);
zfs_setprop_error(hdl, prop, intval, tbuf);
}
}
}
if (err == 0 && snapprops_nvlist) {
zfs_cmd_t zc = {"\0"};
(void) strlcpy(zc.zc_name, destsnap, sizeof (zc.zc_name));
zc.zc_cookie = B_TRUE; /* received */
zcmd_write_src_nvlist(hdl, &zc, snapprops_nvlist);
(void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc);
zcmd_free_nvlists(&zc);
}
if (err == 0 && snapholds_nvlist) {
nvpair_t *pair;
nvlist_t *holds, *errors = NULL;
int cleanup_fd = -1;
VERIFY(0 == nvlist_alloc(&holds, 0, KM_SLEEP));
for (pair = nvlist_next_nvpair(snapholds_nvlist, NULL);
pair != NULL;
pair = nvlist_next_nvpair(snapholds_nvlist, pair)) {
fnvlist_add_string(holds, destsnap, nvpair_name(pair));
}
(void) lzc_hold(holds, cleanup_fd, &errors);
fnvlist_free(snapholds_nvlist);
fnvlist_free(holds);
}
if (err && (ioctl_errno == ENOENT || ioctl_errno == EEXIST)) {
/*
* It may be that this snapshot already exists,
* in which case we want to consume & ignore it
* rather than failing.
*/
avl_tree_t *local_avl;
nvlist_t *local_nv, *fs;
cp = strchr(destsnap, '@');
/*
* XXX Do this faster by just iterating over snaps in
* this fs. Also if zc_value does not exist, we will
* get a strange "does not exist" error message.
*/
*cp = '\0';
if (gather_nvlist(hdl, destsnap, NULL, NULL, B_FALSE, B_TRUE,
B_FALSE, B_FALSE, B_FALSE, B_FALSE, B_FALSE, B_FALSE,
B_TRUE, &local_nv, &local_avl) == 0) {
*cp = '@';
fs = fsavl_find(local_avl, drrb->drr_toguid, NULL);
fsavl_destroy(local_avl);
fnvlist_free(local_nv);
if (fs != NULL) {
if (flags->verbose) {
(void) printf("snap %s already exists; "
"ignoring\n", destsnap);
}
err = ioctl_err = recv_skip(hdl, infd,
flags->byteswap);
}
}
*cp = '@';
}
if (ioctl_err != 0) {
switch (ioctl_errno) {
case ENODEV:
cp = strchr(destsnap, '@');
*cp = '\0';
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"most recent snapshot of %s does not\n"
"match incremental source"), destsnap);
(void) zfs_error(hdl, EZFS_BADRESTORE, errbuf);
*cp = '@';
break;
case ETXTBSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination %s has been modified\n"
"since most recent snapshot"), name);
(void) zfs_error(hdl, EZFS_BADRESTORE, errbuf);
break;
case EACCES:
if (flags->heal) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"key must be loaded to do a non-raw "
"corrective recv on an encrypted "
"dataset."));
} else if (raw && stream_wantsnewfs) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"failed to create encryption key"));
} else if (raw && !stream_wantsnewfs) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption key does not match "
"existing key"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"inherited key must be loaded"));
}
(void) zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
break;
case EEXIST:
cp = strchr(destsnap, '@');
if (newfs) {
/* it's the containing fs that exists */
*cp = '\0';
}
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination already exists"));
(void) zfs_error_fmt(hdl, EZFS_EXISTS,
dgettext(TEXT_DOMAIN, "cannot restore to %s"),
destsnap);
*cp = '@';
break;
case EINVAL:
if (embedded && !raw) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incompatible embedded data stream "
"feature with encrypted receive."));
} else if (flags->resumable) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"kernel modules must be upgraded to "
"receive this stream."));
}
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ECKSUM:
case ZFS_ERR_STREAM_TRUNCATED:
if (flags->heal)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"corrective receive was not able to "
"reconstruct the data needed for "
"healing."));
else
recv_ecksum_set_aux(hdl, destsnap,
flags->resumable, ioctl_err == ECKSUM);
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ZFS_ERR_STREAM_LARGE_BLOCK_MISMATCH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental send stream requires -L "
"(--large-block), to match previous receive."));
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ENOTSUP:
if (flags->heal)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"stream is not compatible with the "
"data in the pool."));
else
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to receive this "
"stream."));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case ZFS_ERR_CRYPTO_NOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"stream uses crypto parameters not compatible with "
"this pool"));
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case EDQUOT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination %s space quota exceeded."), name);
(void) zfs_error(hdl, EZFS_NOSPC, errbuf);
break;
case ZFS_ERR_FROM_IVSET_GUID_MISSING:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"IV set guid missing. See errata %u at "
"https://openzfs.github.io/openzfs-docs/msg/"
"ZFS-8000-ER."),
ZPOOL_ERRATA_ZOL_8308_ENCRYPTION);
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ZFS_ERR_FROM_IVSET_GUID_MISMATCH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"IV set guid mismatch. See the 'zfs receive' "
"man page section\n discussing the limitations "
"of raw encrypted send streams."));
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ZFS_ERR_SPILL_BLOCK_FLAG_MISSING:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Spill block flag missing for raw send.\n"
"The zfs software on the sending system must "
"be updated."));
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ZFS_ERR_RESUME_EXISTS:
cp = strchr(destsnap, '@');
if (newfs) {
/* it's the containing fs that exists */
*cp = '\0';
}
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Resuming recv on existing dataset without force"));
(void) zfs_error_fmt(hdl, EZFS_RESUME_EXISTS,
dgettext(TEXT_DOMAIN, "cannot resume recv %s"),
destsnap);
*cp = '@';
break;
case E2BIG:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"zfs receive required kernel memory allocation "
"larger than the system can support. Please file "
"an issue at the OpenZFS issue tracker:\n"
"https://github.com/openzfs/zfs/issues/new"));
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case EBUSY:
if (hastoken) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination %s contains "
"partially-complete state from "
"\"zfs receive -s\"."), name);
(void) zfs_error(hdl, EZFS_BUSY, errbuf);
break;
}
zfs_fallthrough;
default:
(void) zfs_standard_error(hdl, ioctl_errno, errbuf);
}
}
/*
* Mount the target filesystem (if created). Also mount any
* children of the target filesystem if we did a replication
* receive (indicated by stream_avl being non-NULL).
*/
if (clp) {
if (!flags->nomount)
err |= changelist_postfix(clp);
changelist_free(clp);
}
if ((newfs || stream_avl) && type == ZFS_TYPE_FILESYSTEM && !redacted)
flags->domount = B_TRUE;
if (prop_errflags & ZPROP_ERR_NOCLEAR) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Warning: "
"failed to clear unreceived properties on %s"), name);
(void) fprintf(stderr, "\n");
}
if (prop_errflags & ZPROP_ERR_NORESTORE) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Warning: "
"failed to restore original properties on %s"), name);
(void) fprintf(stderr, "\n");
}
if (err || ioctl_err) {
err = -1;
goto out;
}
if (flags->verbose) {
char buf1[64];
char buf2[64];
uint64_t bytes = read_bytes;
struct timespec delta;
clock_gettime(CLOCK_MONOTONIC_RAW, &delta);
if (begin_time.tv_nsec > delta.tv_nsec) {
delta.tv_nsec =
1000000000 + delta.tv_nsec - begin_time.tv_nsec;
delta.tv_sec -= 1;
} else
delta.tv_nsec -= begin_time.tv_nsec;
delta.tv_sec -= begin_time.tv_sec;
if (delta.tv_sec == 0 && delta.tv_nsec == 0)
delta.tv_nsec = 1;
double delta_f = delta.tv_sec + (delta.tv_nsec / 1e9);
zfs_nicebytes(bytes, buf1, sizeof (buf1));
zfs_nicebytes(bytes / delta_f, buf2, sizeof (buf2));
(void) printf("received %s stream in %.2f seconds (%s/sec)\n",
buf1, delta_f, buf2);
}
err = 0;
out:
if (prop_errors != NULL)
fnvlist_free(prop_errors);
if (tmp_keylocation[0] != '\0') {
fnvlist_add_string(rcvprops,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), tmp_keylocation);
}
if (newprops)
fnvlist_free(rcvprops);
fnvlist_free(oxprops);
fnvlist_free(origprops);
return (err);
}
/*
* Check properties we were asked to override (both -o|-x)
*/
static boolean_t
zfs_receive_checkprops(libzfs_handle_t *hdl, nvlist_t *props,
const char *errbuf)
{
nvpair_t *nvp = NULL;
zfs_prop_t prop;
const char *name;
while ((nvp = nvlist_next_nvpair(props, nvp)) != NULL) {
name = nvpair_name(nvp);
prop = zfs_name_to_prop(name);
if (prop == ZPROP_USERPROP) {
if (!zfs_prop_user(name)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"%s: invalid property '%s'"), errbuf, name);
return (B_FALSE);
}
continue;
}
/*
* "origin" is readonly but is used to receive datasets as
* clones so we don't raise an error here
*/
if (prop == ZFS_PROP_ORIGIN)
continue;
/* encryption params have their own verification later */
if (prop == ZFS_PROP_ENCRYPTION ||
zfs_prop_encryption_key_param(prop))
continue;
/*
* cannot override readonly, set-once and other specific
* settable properties
*/
if (zfs_prop_readonly(prop) || prop == ZFS_PROP_VERSION ||
prop == ZFS_PROP_VOLSIZE) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"%s: invalid property '%s'"), errbuf, name);
return (B_FALSE);
}
}
return (B_TRUE);
}
static int
zfs_receive_impl(libzfs_handle_t *hdl, const char *tosnap,
const char *originsnap, recvflags_t *flags, int infd, const char *sendfs,
nvlist_t *stream_nv, avl_tree_t *stream_avl, char **top_zfs,
const char *finalsnap, nvlist_t *cmdprops)
{
int err;
dmu_replay_record_t drr, drr_noswap;
struct drr_begin *drrb = &drr.drr_u.drr_begin;
char errbuf[ERRBUFLEN];
zio_cksum_t zcksum = { { 0 } };
uint64_t featureflags;
int hdrtype;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive"));
/* check cmdline props, raise an error if they cannot be received */
if (!zfs_receive_checkprops(hdl, cmdprops, errbuf))
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
if (flags->isprefix &&
!zfs_dataset_exists(hdl, tosnap, ZFS_TYPE_DATASET)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "specified fs "
"(%s) does not exist"), tosnap);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
if (originsnap &&
!zfs_dataset_exists(hdl, originsnap, ZFS_TYPE_DATASET)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "specified origin fs "
"(%s) does not exist"), originsnap);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
/* read in the BEGIN record */
if (0 != (err = recv_read(hdl, infd, &drr, sizeof (drr), B_FALSE,
&zcksum)))
return (err);
if (drr.drr_type == DRR_END || drr.drr_type == BSWAP_32(DRR_END)) {
/* It's the double end record at the end of a package */
return (ENODATA);
}
/* the kernel needs the non-byteswapped begin record */
drr_noswap = drr;
flags->byteswap = B_FALSE;
if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
/*
* We computed the checksum in the wrong byteorder in
* recv_read() above; do it again correctly.
*/
memset(&zcksum, 0, sizeof (zio_cksum_t));
fletcher_4_incremental_byteswap(&drr, sizeof (drr), &zcksum);
flags->byteswap = B_TRUE;
drr.drr_type = BSWAP_32(drr.drr_type);
drr.drr_payloadlen = BSWAP_32(drr.drr_payloadlen);
drrb->drr_magic = BSWAP_64(drrb->drr_magic);
drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
drrb->drr_type = BSWAP_32(drrb->drr_type);
drrb->drr_flags = BSWAP_32(drrb->drr_flags);
drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
}
if (drrb->drr_magic != DMU_BACKUP_MAGIC || drr.drr_type != DRR_BEGIN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"stream (bad magic number)"));
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
}
featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
hdrtype = DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo);
if (!DMU_STREAM_SUPPORTED(featureflags) ||
(hdrtype != DMU_SUBSTREAM && hdrtype != DMU_COMPOUNDSTREAM)) {
/*
* Let's be explicit about this one, since rather than
* being a new feature we can't know, it's an old
* feature we dropped.
*/
if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"stream has deprecated feature: dedup, try "
"'zstream redup [send in a file] | zfs recv "
"[...]'"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"stream has unsupported feature, feature flags = "
"%llx (unknown flags = %llx)"),
(u_longlong_t)featureflags,
(u_longlong_t)((featureflags) &
~DMU_BACKUP_FEATURE_MASK));
}
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
}
/* Holds feature is set once in the compound stream header. */
if (featureflags & DMU_BACKUP_FEATURE_HOLDS)
flags->holds = B_TRUE;
if (strchr(drrb->drr_toname, '@') == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"stream (bad snapshot name)"));
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
}
if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == DMU_SUBSTREAM) {
char nonpackage_sendfs[ZFS_MAX_DATASET_NAME_LEN];
if (sendfs == NULL) {
/*
* We were not called from zfs_receive_package(). Get
* the fs specified by 'zfs send'.
*/
char *cp;
(void) strlcpy(nonpackage_sendfs,
drr.drr_u.drr_begin.drr_toname,
sizeof (nonpackage_sendfs));
if ((cp = strchr(nonpackage_sendfs, '@')) != NULL)
*cp = '\0';
sendfs = nonpackage_sendfs;
VERIFY(finalsnap == NULL);
}
return (zfs_receive_one(hdl, infd, tosnap, originsnap, flags,
&drr, &drr_noswap, sendfs, stream_nv, stream_avl, top_zfs,
finalsnap, cmdprops));
} else {
assert(DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
DMU_COMPOUNDSTREAM);
return (zfs_receive_package(hdl, infd, tosnap, flags, &drr,
&zcksum, top_zfs, cmdprops));
}
}
/*
* Restores a backup of tosnap from the file descriptor specified by infd.
* Return 0 on total success, -2 if some things couldn't be
* destroyed/renamed/promoted, -1 if some things couldn't be received.
* (-1 will override -2, if -1 and the resumable flag was specified the
* transfer can be resumed if the sending side supports it).
*/
int
zfs_receive(libzfs_handle_t *hdl, const char *tosnap, nvlist_t *props,
recvflags_t *flags, int infd, avl_tree_t *stream_avl)
{
char *top_zfs = NULL;
int err;
struct stat sb;
const char *originsnap = NULL;
/*
* The only way fstat can fail is if we do not have a valid file
* descriptor.
*/
if (fstat(infd, &sb) == -1) {
perror("fstat");
return (-2);
}
if (props) {
err = nvlist_lookup_string(props, "origin", &originsnap);
if (err && err != ENOENT)
return (err);
}
err = zfs_receive_impl(hdl, tosnap, originsnap, flags, infd, NULL, NULL,
stream_avl, &top_zfs, NULL, props);
if (err == 0 && !flags->nomount && flags->domount && top_zfs) {
zfs_handle_t *zhp = NULL;
prop_changelist_t *clp = NULL;
zhp = zfs_open(hdl, top_zfs,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
err = -1;
goto out;
} else {
if (zhp->zfs_type == ZFS_TYPE_VOLUME) {
zfs_close(zhp);
goto out;
}
clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT,
CL_GATHER_MOUNT_ALWAYS,
flags->forceunmount ? MS_FORCE : 0);
zfs_close(zhp);
if (clp == NULL) {
err = -1;
goto out;
}
/* mount and share received datasets */
err = changelist_postfix(clp);
changelist_free(clp);
if (err != 0)
err = -1;
}
}
out:
if (top_zfs)
free(top_zfs);
return (err);
}
diff --git a/sys/contrib/openzfs/man/Makefile.am b/sys/contrib/openzfs/man/Makefile.am
index 2973520324fd..36c1aede106e 100644
--- a/sys/contrib/openzfs/man/Makefile.am
+++ b/sys/contrib/openzfs/man/Makefile.am
@@ -1,127 +1,135 @@
dist_noinst_man_MANS = \
%D%/man1/cstyle.1
dist_man_MANS = \
%D%/man1/arcstat.1 \
%D%/man1/raidz_test.1 \
%D%/man1/test-runner.1 \
%D%/man1/zhack.1 \
%D%/man1/ztest.1 \
%D%/man1/zvol_wait.1 \
\
%D%/man5/vdev_id.conf.5 \
\
%D%/man4/spl.4 \
%D%/man4/zfs.4 \
\
%D%/man7/dracut.zfs.7 \
%D%/man7/vdevprops.7 \
%D%/man7/zfsconcepts.7 \
%D%/man7/zfsprops.7 \
%D%/man7/zpool-features.7 \
%D%/man7/zpoolconcepts.7 \
%D%/man7/zpoolprops.7 \
\
%D%/man8/fsck.zfs.8 \
%D%/man8/mount.zfs.8 \
%D%/man8/vdev_id.8 \
%D%/man8/zdb.8 \
%D%/man8/zfs.8 \
%D%/man8/zfs-allow.8 \
%D%/man8/zfs-bookmark.8 \
%D%/man8/zfs-change-key.8 \
%D%/man8/zfs-clone.8 \
%D%/man8/zfs-create.8 \
%D%/man8/zfs-destroy.8 \
%D%/man8/zfs-diff.8 \
%D%/man8/zfs-get.8 \
%D%/man8/zfs-groupspace.8 \
%D%/man8/zfs-hold.8 \
%D%/man8/zfs-inherit.8 \
- %D%/man8/zfs-jail.8 \
%D%/man8/zfs-list.8 \
%D%/man8/zfs-load-key.8 \
%D%/man8/zfs-mount.8 \
%D%/man8/zfs-program.8 \
%D%/man8/zfs-project.8 \
%D%/man8/zfs-projectspace.8 \
%D%/man8/zfs-promote.8 \
%D%/man8/zfs-receive.8 \
%D%/man8/zfs-recv.8 \
%D%/man8/zfs-redact.8 \
%D%/man8/zfs-release.8 \
%D%/man8/zfs-rename.8 \
%D%/man8/zfs-rollback.8 \
%D%/man8/zfs-send.8 \
%D%/man8/zfs-set.8 \
%D%/man8/zfs-share.8 \
%D%/man8/zfs-snapshot.8 \
%D%/man8/zfs-unallow.8 \
- %D%/man8/zfs-unjail.8 \
%D%/man8/zfs-unload-key.8 \
%D%/man8/zfs-unmount.8 \
- %D%/man8/zfs-unzone.8 \
%D%/man8/zfs-upgrade.8 \
%D%/man8/zfs-userspace.8 \
%D%/man8/zfs-wait.8 \
- %D%/man8/zfs-zone.8 \
%D%/man8/zfs_ids_to_path.8 \
%D%/man8/zgenhostid.8 \
%D%/man8/zinject.8 \
%D%/man8/zpool.8 \
%D%/man8/zpool-add.8 \
%D%/man8/zpool-attach.8 \
%D%/man8/zpool-checkpoint.8 \
%D%/man8/zpool-clear.8 \
%D%/man8/zpool-create.8 \
%D%/man8/zpool-destroy.8 \
%D%/man8/zpool-detach.8 \
%D%/man8/zpool-events.8 \
%D%/man8/zpool-export.8 \
%D%/man8/zpool-get.8 \
%D%/man8/zpool-history.8 \
%D%/man8/zpool-import.8 \
%D%/man8/zpool-initialize.8 \
%D%/man8/zpool-iostat.8 \
%D%/man8/zpool-labelclear.8 \
%D%/man8/zpool-list.8 \
%D%/man8/zpool-offline.8 \
%D%/man8/zpool-online.8 \
%D%/man8/zpool-reguid.8 \
%D%/man8/zpool-remove.8 \
%D%/man8/zpool-reopen.8 \
%D%/man8/zpool-replace.8 \
%D%/man8/zpool-resilver.8 \
%D%/man8/zpool-scrub.8 \
%D%/man8/zpool-set.8 \
%D%/man8/zpool-split.8 \
%D%/man8/zpool-status.8 \
%D%/man8/zpool-sync.8 \
%D%/man8/zpool-trim.8 \
%D%/man8/zpool-upgrade.8 \
%D%/man8/zpool-wait.8 \
%D%/man8/zstream.8 \
%D%/man8/zstreamdump.8 \
%D%/man8/zpool_influxdb.8
+if BUILD_FREEBSD
+dist_man_MANS += \
+ %D%/man8/zfs-jail.8 \
+ %D%/man8/zfs-unjail.8
+endif
+
+if BUILD_LINUX
+dist_man_MANS += \
+ %D%/man8/zfs-unzone.8 \
+ %D%/man8/zfs-zone.8
+endif
+
nodist_man_MANS = \
%D%/man8/zed.8 \
%D%/man8/zfs-mount-generator.8
dist_noinst_DATA += $(dist_noinst_man_MANS) $(dist_man_MANS)
SUBSTFILES += $(nodist_man_MANS)
CHECKS += mancheck
mancheck:
$(top_srcdir)/scripts/mancheck.sh $(srcdir)/%D%
if BUILD_LINUX
# The manual pager in most Linux distros defaults to "BSD" when .Os is blank,
# but leaving it blank makes things a lot easier on
# FreeBSD when OpenZFS is vendored in the base system.
INSTALL_DATA_HOOKS += man-install-data-hook
man-install-data-hook:
cd $(DESTDIR)$(mandir) && $(SED) $(ac_inplace) 's/^\.Os$$/.Os OpenZFS/' $(subst %D%/,,$(dist_man_MANS) $(nodist_man_MANS))
endif
diff --git a/sys/contrib/openzfs/man/man4/zfs.4 b/sys/contrib/openzfs/man/man4/zfs.4
index 271b02b6ee42..3843419731b8 100644
--- a/sys/contrib/openzfs/man/man4/zfs.4
+++ b/sys/contrib/openzfs/man/man4/zfs.4
@@ -1,2585 +1,2587 @@
.\"
.\" Copyright (c) 2013 by Turbo Fredriksson <turbo@bayour.com>. All rights reserved.
.\" Copyright (c) 2019, 2021 by Delphix. All rights reserved.
.\" Copyright (c) 2019 Datto Inc.
.\" The contents of this file are subject to the terms of the Common Development
.\" and Distribution License (the "License"). You may not use this file except
.\" in compliance with the License. You can obtain a copy of the license at
.\" usr/src/OPENSOLARIS.LICENSE or https://opensource.org/licenses/CDDL-1.0.
.\"
.\" See the License for the specific language governing permissions and
.\" limitations under the License. When distributing Covered Code, include this
.\" CDDL HEADER in each file and include the License file at
.\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
.\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
.\" own identifying information:
.\" Portions Copyright [yyyy] [name of copyright owner]
.\"
-.Dd January 10, 2023
+.Dd July 21, 2023
.Dt ZFS 4
.Os
.
.Sh NAME
.Nm zfs
.Nd tuning of the ZFS kernel module
.
.Sh DESCRIPTION
The ZFS module supports these parameters:
.Bl -tag -width Ds
.It Sy dbuf_cache_max_bytes Ns = Ns Sy UINT64_MAX Ns B Pq u64
Maximum size in bytes of the dbuf cache.
The target size is determined by the MIN versus
.No 1/2^ Ns Sy dbuf_cache_shift Pq 1/32nd
of the target ARC size.
The behavior of the dbuf cache and its associated settings
can be observed via the
.Pa /proc/spl/kstat/zfs/dbufstats
kstat.
.
.It Sy dbuf_metadata_cache_max_bytes Ns = Ns Sy UINT64_MAX Ns B Pq u64
Maximum size in bytes of the metadata dbuf cache.
The target size is determined by the MIN versus
.No 1/2^ Ns Sy dbuf_metadata_cache_shift Pq 1/64th
of the target ARC size.
The behavior of the metadata dbuf cache and its associated settings
can be observed via the
.Pa /proc/spl/kstat/zfs/dbufstats
kstat.
.
.It Sy dbuf_cache_hiwater_pct Ns = Ns Sy 10 Ns % Pq uint
The percentage over
.Sy dbuf_cache_max_bytes
when dbufs must be evicted directly.
.
.It Sy dbuf_cache_lowater_pct Ns = Ns Sy 10 Ns % Pq uint
The percentage below
.Sy dbuf_cache_max_bytes
when the evict thread stops evicting dbufs.
.
.It Sy dbuf_cache_shift Ns = Ns Sy 5 Pq uint
Set the size of the dbuf cache
.Pq Sy dbuf_cache_max_bytes
to a log2 fraction of the target ARC size.
.
.It Sy dbuf_metadata_cache_shift Ns = Ns Sy 6 Pq uint
Set the size of the dbuf metadata cache
.Pq Sy dbuf_metadata_cache_max_bytes
to a log2 fraction of the target ARC size.
.
.It Sy dbuf_mutex_cache_shift Ns = Ns Sy 0 Pq uint
Set the size of the mutex array for the dbuf cache.
When set to
.Sy 0
the array is dynamically sized based on total system memory.
.
.It Sy dmu_object_alloc_chunk_shift Ns = Ns Sy 7 Po 128 Pc Pq uint
dnode slots allocated in a single operation as a power of 2.
The default value minimizes lock contention for the bulk operation performed.
.
.It Sy dmu_prefetch_max Ns = Ns Sy 134217728 Ns B Po 128 MiB Pc Pq uint
Limit the amount we can prefetch with one call to this amount in bytes.
This helps to limit the amount of memory that can be used by prefetching.
.
.It Sy ignore_hole_birth Pq int
Alias for
.Sy send_holes_without_birth_time .
.
.It Sy l2arc_feed_again Ns = Ns Sy 1 Ns | Ns 0 Pq int
Turbo L2ARC warm-up.
When the L2ARC is cold the fill interval will be set as fast as possible.
.
.It Sy l2arc_feed_min_ms Ns = Ns Sy 200 Pq u64
Min feed interval in milliseconds.
Requires
.Sy l2arc_feed_again Ns = Ns Ar 1
and only applicable in related situations.
.
.It Sy l2arc_feed_secs Ns = Ns Sy 1 Pq u64
Seconds between L2ARC writing.
.
.It Sy l2arc_headroom Ns = Ns Sy 2 Pq u64
How far through the ARC lists to search for L2ARC cacheable content,
expressed as a multiplier of
.Sy l2arc_write_max .
ARC persistence across reboots can be achieved with persistent L2ARC
by setting this parameter to
.Sy 0 ,
allowing the full length of ARC lists to be searched for cacheable content.
.
.It Sy l2arc_headroom_boost Ns = Ns Sy 200 Ns % Pq u64
Scales
.Sy l2arc_headroom
by this percentage when L2ARC contents are being successfully compressed
before writing.
A value of
.Sy 100
disables this feature.
.
.It Sy l2arc_exclude_special Ns = Ns Sy 0 Ns | Ns 1 Pq int
Controls whether buffers present on special vdevs are eligible for caching
into L2ARC.
If set to 1, exclude dbufs on special vdevs from being cached to L2ARC.
.
.It Sy l2arc_mfuonly Ns = Ns Sy 0 Ns | Ns 1 Pq int
Controls whether only MFU metadata and data are cached from ARC into L2ARC.
This may be desired to avoid wasting space on L2ARC when reading/writing large
amounts of data that are not expected to be accessed more than once.
.Pp
The default is off,
meaning both MRU and MFU data and metadata are cached.
When turning off this feature, some MRU buffers will still be present
in ARC and eventually cached on L2ARC.
.No If Sy l2arc_noprefetch Ns = Ns Sy 0 ,
some prefetched buffers will be cached to L2ARC, and those might later
transition to MRU, in which case the
.Sy l2arc_mru_asize No arcstat will not be Sy 0 .
.Pp
Regardless of
.Sy l2arc_noprefetch ,
some MFU buffers might be evicted from ARC,
accessed later on as prefetches and transition to MRU as prefetches.
If accessed again they are counted as MRU and the
.Sy l2arc_mru_asize No arcstat will not be Sy 0 .
.Pp
The ARC status of L2ARC buffers when they were first cached in
L2ARC can be seen in the
.Sy l2arc_mru_asize , Sy l2arc_mfu_asize , No and Sy l2arc_prefetch_asize
arcstats when importing the pool or onlining a cache
device if persistent L2ARC is enabled.
.Pp
The
.Sy evict_l2_eligible_mru
arcstat does not take into account if this option is enabled as the information
provided by the
.Sy evict_l2_eligible_m[rf]u
arcstats can be used to decide if toggling this option is appropriate
for the current workload.
.
.It Sy l2arc_meta_percent Ns = Ns Sy 33 Ns % Pq uint
Percent of ARC size allowed for L2ARC-only headers.
Since L2ARC buffers are not evicted on memory pressure,
too many headers on a system with an irrationally large L2ARC
can render it slow or unusable.
This parameter limits L2ARC writes and rebuilds to achieve the target.
.
.It Sy l2arc_trim_ahead Ns = Ns Sy 0 Ns % Pq u64
Trims ahead of the current write size
.Pq Sy l2arc_write_max
on L2ARC devices by this percentage of write size if we have filled the device.
If set to
.Sy 100
we TRIM twice the space required to accommodate upcoming writes.
A minimum of
.Sy 64 MiB
will be trimmed.
It also enables TRIM of the whole L2ARC device upon creation
or addition to an existing pool or if the header of the device is
invalid upon importing a pool or onlining a cache device.
A value of
.Sy 0
disables TRIM on L2ARC altogether and is the default as it can put significant
stress on the underlying storage devices.
This will vary depending of how well the specific device handles these commands.
.
.It Sy l2arc_noprefetch Ns = Ns Sy 1 Ns | Ns 0 Pq int
Do not write buffers to L2ARC if they were prefetched but not used by
applications.
In case there are prefetched buffers in L2ARC and this option
is later set, we do not read the prefetched buffers from L2ARC.
Unsetting this option is useful for caching sequential reads from the
disks to L2ARC and serve those reads from L2ARC later on.
This may be beneficial in case the L2ARC device is significantly faster
in sequential reads than the disks of the pool.
.Pp
Use
.Sy 1
to disable and
.Sy 0
to enable caching/reading prefetches to/from L2ARC.
.
.It Sy l2arc_norw Ns = Ns Sy 0 Ns | Ns 1 Pq int
No reads during writes.
.
.It Sy l2arc_write_boost Ns = Ns Sy 8388608 Ns B Po 8 MiB Pc Pq u64
Cold L2ARC devices will have
.Sy l2arc_write_max
increased by this amount while they remain cold.
.
.It Sy l2arc_write_max Ns = Ns Sy 8388608 Ns B Po 8 MiB Pc Pq u64
Max write bytes per interval.
.
.It Sy l2arc_rebuild_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Rebuild the L2ARC when importing a pool (persistent L2ARC).
This can be disabled if there are problems importing a pool
or attaching an L2ARC device (e.g. the L2ARC device is slow
in reading stored log metadata, or the metadata
has become somehow fragmented/unusable).
.
.It Sy l2arc_rebuild_blocks_min_l2size Ns = Ns Sy 1073741824 Ns B Po 1 GiB Pc Pq u64
Mininum size of an L2ARC device required in order to write log blocks in it.
The log blocks are used upon importing the pool to rebuild the persistent L2ARC.
.Pp
For L2ARC devices less than 1 GiB, the amount of data
.Fn l2arc_evict
evicts is significant compared to the amount of restored L2ARC data.
In this case, do not write log blocks in L2ARC in order not to waste space.
.
.It Sy metaslab_aliquot Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
Metaslab granularity, in bytes.
This is roughly similar to what would be referred to as the "stripe size"
in traditional RAID arrays.
In normal operation, ZFS will try to write this amount of data to each disk
before moving on to the next top-level vdev.
.
.It Sy metaslab_bias_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable metaslab group biasing based on their vdevs' over- or under-utilization
relative to the pool.
.
.It Sy metaslab_force_ganging Ns = Ns Sy 16777217 Ns B Po 16 MiB + 1 B Pc Pq u64
Make some blocks above a certain size be gang blocks.
This option is used by the test suite to facilitate testing.
.
+.It Sy metaslab_force_ganging_pct Ns = Ns Sy 3 Ns % Pq uint
+For blocks that could be forced to be a gang block (due to
+.Sy metaslab_force_ganging ) ,
+force this many of them to be gang blocks.
+.
.It Sy zfs_ddt_zap_default_bs Ns = Ns Sy 15 Po 32 KiB Pc Pq int
Default DDT ZAP data block size as a power of 2. Note that changing this after
creating a DDT on the pool will not affect existing DDTs, only newly created
ones.
.
.It Sy zfs_ddt_zap_default_ibs Ns = Ns Sy 15 Po 32 KiB Pc Pq int
Default DDT ZAP indirect block size as a power of 2. Note that changing this
after creating a DDT on the pool will not affect existing DDTs, only newly
created ones.
.
.It Sy zfs_default_bs Ns = Ns Sy 9 Po 512 B Pc Pq int
Default dnode block size as a power of 2.
.
.It Sy zfs_default_ibs Ns = Ns Sy 17 Po 128 KiB Pc Pq int
Default dnode indirect block size as a power of 2.
.
.It Sy zfs_history_output_max Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
When attempting to log an output nvlist of an ioctl in the on-disk history,
the output will not be stored if it is larger than this size (in bytes).
This must be less than
.Sy DMU_MAX_ACCESS Pq 64 MiB .
This applies primarily to
.Fn zfs_ioc_channel_program Pq cf. Xr zfs-program 8 .
.
.It Sy zfs_keep_log_spacemaps_at_export Ns = Ns Sy 0 Ns | Ns 1 Pq int
Prevent log spacemaps from being destroyed during pool exports and destroys.
.
.It Sy zfs_metaslab_segment_weight_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable/disable segment-based metaslab selection.
.
.It Sy zfs_metaslab_switch_threshold Ns = Ns Sy 2 Pq int
When using segment-based metaslab selection, continue allocating
from the active metaslab until this option's
worth of buckets have been exhausted.
.
.It Sy metaslab_debug_load Ns = Ns Sy 0 Ns | Ns 1 Pq int
Load all metaslabs during pool import.
.
.It Sy metaslab_debug_unload Ns = Ns Sy 0 Ns | Ns 1 Pq int
Prevent metaslabs from being unloaded.
.
.It Sy metaslab_fragmentation_factor_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable use of the fragmentation metric in computing metaslab weights.
.
.It Sy metaslab_df_max_search Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint
Maximum distance to search forward from the last offset.
Without this limit, fragmented pools can see
.Em >100`000
iterations and
.Fn metaslab_block_picker
becomes the performance limiting factor on high-performance storage.
.Pp
With the default setting of
.Sy 16 MiB ,
we typically see less than
.Em 500
iterations, even with very fragmented
.Sy ashift Ns = Ns Sy 9
pools.
The maximum number of iterations possible is
.Sy metaslab_df_max_search / 2^(ashift+1) .
With the default setting of
.Sy 16 MiB
this is
.Em 16*1024 Pq with Sy ashift Ns = Ns Sy 9
or
.Em 2*1024 Pq with Sy ashift Ns = Ns Sy 12 .
.
.It Sy metaslab_df_use_largest_segment Ns = Ns Sy 0 Ns | Ns 1 Pq int
If not searching forward (due to
.Sy metaslab_df_max_search , metaslab_df_free_pct ,
.No or Sy metaslab_df_alloc_threshold ) ,
this tunable controls which segment is used.
If set, we will use the largest free segment.
If unset, we will use a segment of at least the requested size.
.
.It Sy zfs_metaslab_max_size_cache_sec Ns = Ns Sy 3600 Ns s Po 1 hour Pc Pq u64
When we unload a metaslab, we cache the size of the largest free chunk.
We use that cached size to determine whether or not to load a metaslab
for a given allocation.
As more frees accumulate in that metaslab while it's unloaded,
the cached max size becomes less and less accurate.
After a number of seconds controlled by this tunable,
we stop considering the cached max size and start
considering only the histogram instead.
.
.It Sy zfs_metaslab_mem_limit Ns = Ns Sy 25 Ns % Pq uint
When we are loading a new metaslab, we check the amount of memory being used
to store metaslab range trees.
If it is over a threshold, we attempt to unload the least recently used metaslab
to prevent the system from clogging all of its memory with range trees.
This tunable sets the percentage of total system memory that is the threshold.
.
.It Sy zfs_metaslab_try_hard_before_gang Ns = Ns Sy 0 Ns | Ns 1 Pq int
.Bl -item -compact
.It
If unset, we will first try normal allocation.
.It
If that fails then we will do a gang allocation.
.It
If that fails then we will do a "try hard" gang allocation.
.It
If that fails then we will have a multi-layer gang block.
.El
.Pp
.Bl -item -compact
.It
If set, we will first try normal allocation.
.It
If that fails then we will do a "try hard" allocation.
.It
If that fails we will do a gang allocation.
.It
If that fails we will do a "try hard" gang allocation.
.It
If that fails then we will have a multi-layer gang block.
.El
.
.It Sy zfs_metaslab_find_max_tries Ns = Ns Sy 100 Pq uint
When not trying hard, we only consider this number of the best metaslabs.
This improves performance, especially when there are many metaslabs per vdev
and the allocation can't actually be satisfied
(so we would otherwise iterate all metaslabs).
.
.It Sy zfs_vdev_default_ms_count Ns = Ns Sy 200 Pq uint
When a vdev is added, target this number of metaslabs per top-level vdev.
.
.It Sy zfs_vdev_default_ms_shift Ns = Ns Sy 29 Po 512 MiB Pc Pq uint
Default lower limit for metaslab size.
.
.It Sy zfs_vdev_max_ms_shift Ns = Ns Sy 34 Po 16 GiB Pc Pq uint
Default upper limit for metaslab size.
.
.It Sy zfs_vdev_max_auto_ashift Ns = Ns Sy 14 Pq uint
Maximum ashift used when optimizing for logical \[->] physical sector size on
new
top-level vdevs.
May be increased up to
.Sy ASHIFT_MAX Po 16 Pc ,
but this may negatively impact pool space efficiency.
.
.It Sy zfs_vdev_min_auto_ashift Ns = Ns Sy ASHIFT_MIN Po 9 Pc Pq uint
Minimum ashift used when creating new top-level vdevs.
.
.It Sy zfs_vdev_min_ms_count Ns = Ns Sy 16 Pq uint
Minimum number of metaslabs to create in a top-level vdev.
.
.It Sy vdev_validate_skip Ns = Ns Sy 0 Ns | Ns 1 Pq int
Skip label validation steps during pool import.
Changing is not recommended unless you know what you're doing
and are recovering a damaged label.
.
.It Sy zfs_vdev_ms_count_limit Ns = Ns Sy 131072 Po 128k Pc Pq uint
Practical upper limit of total metaslabs per top-level vdev.
.
.It Sy metaslab_preload_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable metaslab group preloading.
.
.It Sy metaslab_lba_weighting_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Give more weight to metaslabs with lower LBAs,
assuming they have greater bandwidth,
as is typically the case on a modern constant angular velocity disk drive.
.
.It Sy metaslab_unload_delay Ns = Ns Sy 32 Pq uint
After a metaslab is used, we keep it loaded for this many TXGs, to attempt to
reduce unnecessary reloading.
Note that both this many TXGs and
.Sy metaslab_unload_delay_ms
milliseconds must pass before unloading will occur.
.
.It Sy metaslab_unload_delay_ms Ns = Ns Sy 600000 Ns ms Po 10 min Pc Pq uint
After a metaslab is used, we keep it loaded for this many milliseconds,
to attempt to reduce unnecessary reloading.
Note, that both this many milliseconds and
.Sy metaslab_unload_delay
TXGs must pass before unloading will occur.
.
.It Sy reference_history Ns = Ns Sy 3 Pq uint
Maximum reference holders being tracked when reference_tracking_enable is
active.
.
.It Sy reference_tracking_enable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Track reference holders to
.Sy refcount_t
objects (debug builds only).
.
.It Sy send_holes_without_birth_time Ns = Ns Sy 1 Ns | Ns 0 Pq int
When set, the
.Sy hole_birth
optimization will not be used, and all holes will always be sent during a
.Nm zfs Cm send .
This is useful if you suspect your datasets are affected by a bug in
.Sy hole_birth .
.
.It Sy spa_config_path Ns = Ns Pa /etc/zfs/zpool.cache Pq charp
SPA config file.
.
.It Sy spa_asize_inflation Ns = Ns Sy 24 Pq uint
Multiplication factor used to estimate actual disk consumption from the
size of data being written.
The default value is a worst case estimate,
but lower values may be valid for a given pool depending on its configuration.
Pool administrators who understand the factors involved
may wish to specify a more realistic inflation factor,
particularly if they operate close to quota or capacity limits.
.
.It Sy spa_load_print_vdev_tree Ns = Ns Sy 0 Ns | Ns 1 Pq int
Whether to print the vdev tree in the debugging message buffer during pool
import.
.
.It Sy spa_load_verify_data Ns = Ns Sy 1 Ns | Ns 0 Pq int
Whether to traverse data blocks during an "extreme rewind"
.Pq Fl X
import.
.Pp
An extreme rewind import normally performs a full traversal of all
blocks in the pool for verification.
If this parameter is unset, the traversal skips non-metadata blocks.
It can be toggled once the
import has started to stop or start the traversal of non-metadata blocks.
.
.It Sy spa_load_verify_metadata Ns = Ns Sy 1 Ns | Ns 0 Pq int
Whether to traverse blocks during an "extreme rewind"
.Pq Fl X
pool import.
.Pp
An extreme rewind import normally performs a full traversal of all
blocks in the pool for verification.
If this parameter is unset, the traversal is not performed.
It can be toggled once the import has started to stop or start the traversal.
.
.It Sy spa_load_verify_shift Ns = Ns Sy 4 Po 1/16th Pc Pq uint
Sets the maximum number of bytes to consume during pool import to the log2
fraction of the target ARC size.
.
.It Sy spa_slop_shift Ns = Ns Sy 5 Po 1/32nd Pc Pq int
Normally, we don't allow the last
.Sy 3.2% Pq Sy 1/2^spa_slop_shift
of space in the pool to be consumed.
This ensures that we don't run the pool completely out of space,
due to unaccounted changes (e.g. to the MOS).
It also limits the worst-case time to allocate space.
If we have less than this amount of free space,
most ZPL operations (e.g. write, create) will return
.Sy ENOSPC .
.
.It Sy spa_upgrade_errlog_limit Ns = Ns Sy 0 Pq uint
Limits the number of on-disk error log entries that will be converted to the
new format when enabling the
.Sy head_errlog
feature.
The default is to convert all log entries.
.
.It Sy vdev_removal_max_span Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq uint
During top-level vdev removal, chunks of data are copied from the vdev
which may include free space in order to trade bandwidth for IOPS.
This parameter determines the maximum span of free space, in bytes,
which will be included as "unnecessary" data in a chunk of copied data.
.Pp
The default value here was chosen to align with
.Sy zfs_vdev_read_gap_limit ,
which is a similar concept when doing
regular reads (but there's no reason it has to be the same).
.
.It Sy vdev_file_logical_ashift Ns = Ns Sy 9 Po 512 B Pc Pq u64
Logical ashift for file-based devices.
.
.It Sy vdev_file_physical_ashift Ns = Ns Sy 9 Po 512 B Pc Pq u64
Physical ashift for file-based devices.
.
.It Sy zap_iterate_prefetch Ns = Ns Sy 1 Ns | Ns 0 Pq int
If set, when we start iterating over a ZAP object,
prefetch the entire object (all leaf blocks).
However, this is limited by
.Sy dmu_prefetch_max .
.
.It Sy zap_micro_max_size Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq int
Maximum micro ZAP size.
A micro ZAP is upgraded to a fat ZAP, once it grows beyond the specified size.
.
-.It Sy zfetch_array_rd_sz Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
-If prefetching is enabled, disable prefetching for reads larger than this size.
-.
.It Sy zfetch_min_distance Ns = Ns Sy 4194304 Ns B Po 4 MiB Pc Pq uint
Min bytes to prefetch per stream.
Prefetch distance starts from the demand access size and quickly grows to
this value, doubling on each hit.
After that it may grow further by 1/8 per hit, but only if some prefetch
since last time haven't completed in time to satisfy demand request, i.e.
prefetch depth didn't cover the read latency or the pool got saturated.
.
.It Sy zfetch_max_distance Ns = Ns Sy 67108864 Ns B Po 64 MiB Pc Pq uint
Max bytes to prefetch per stream.
.
.It Sy zfetch_max_idistance Ns = Ns Sy 67108864 Ns B Po 64 MiB Pc Pq uint
Max bytes to prefetch indirects for per stream.
.
.It Sy zfetch_max_streams Ns = Ns Sy 8 Pq uint
Max number of streams per zfetch (prefetch streams per file).
.
.It Sy zfetch_min_sec_reap Ns = Ns Sy 1 Pq uint
Min time before inactive prefetch stream can be reclaimed
.
.It Sy zfetch_max_sec_reap Ns = Ns Sy 2 Pq uint
Max time before inactive prefetch stream can be deleted
.
.It Sy zfs_abd_scatter_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enables ARC from using scatter/gather lists and forces all allocations to be
linear in kernel memory.
Disabling can improve performance in some code paths
at the expense of fragmented kernel memory.
.
.It Sy zfs_abd_scatter_max_order Ns = Ns Sy MAX_ORDER\-1 Pq uint
Maximum number of consecutive memory pages allocated in a single block for
scatter/gather lists.
.Pp
The value of
.Sy MAX_ORDER
depends on kernel configuration.
.
.It Sy zfs_abd_scatter_min_size Ns = Ns Sy 1536 Ns B Po 1.5 KiB Pc Pq uint
This is the minimum allocation size that will use scatter (page-based) ABDs.
Smaller allocations will use linear ABDs.
.
.It Sy zfs_arc_dnode_limit Ns = Ns Sy 0 Ns B Pq u64
When the number of bytes consumed by dnodes in the ARC exceeds this number of
bytes, try to unpin some of it in response to demand for non-metadata.
This value acts as a ceiling to the amount of dnode metadata, and defaults to
.Sy 0 ,
which indicates that a percent which is based on
.Sy zfs_arc_dnode_limit_percent
of the ARC meta buffers that may be used for dnodes.
.It Sy zfs_arc_dnode_limit_percent Ns = Ns Sy 10 Ns % Pq u64
Percentage that can be consumed by dnodes of ARC meta buffers.
.Pp
See also
.Sy zfs_arc_dnode_limit ,
which serves a similar purpose but has a higher priority if nonzero.
.
.It Sy zfs_arc_dnode_reduce_percent Ns = Ns Sy 10 Ns % Pq u64
Percentage of ARC dnodes to try to scan in response to demand for non-metadata
when the number of bytes consumed by dnodes exceeds
.Sy zfs_arc_dnode_limit .
.
.It Sy zfs_arc_average_blocksize Ns = Ns Sy 8192 Ns B Po 8 KiB Pc Pq uint
The ARC's buffer hash table is sized based on the assumption of an average
block size of this value.
This works out to roughly 1 MiB of hash table per 1 GiB of physical memory
with 8-byte pointers.
For configurations with a known larger average block size,
this value can be increased to reduce the memory footprint.
.
.It Sy zfs_arc_eviction_pct Ns = Ns Sy 200 Ns % Pq uint
When
.Fn arc_is_overflowing ,
.Fn arc_get_data_impl
waits for this percent of the requested amount of data to be evicted.
For example, by default, for every
.Em 2 KiB
that's evicted,
.Em 1 KiB
of it may be "reused" by a new allocation.
Since this is above
.Sy 100 Ns % ,
it ensures that progress is made towards getting
.Sy arc_size No under Sy arc_c .
Since this is finite, it ensures that allocations can still happen,
even during the potentially long time that
.Sy arc_size No is more than Sy arc_c .
.
.It Sy zfs_arc_evict_batch_limit Ns = Ns Sy 10 Pq uint
Number ARC headers to evict per sub-list before proceeding to another sub-list.
This batch-style operation prevents entire sub-lists from being evicted at once
but comes at a cost of additional unlocking and locking.
.
.It Sy zfs_arc_grow_retry Ns = Ns Sy 0 Ns s Pq uint
If set to a non zero value, it will replace the
.Sy arc_grow_retry
value with this value.
The
.Sy arc_grow_retry
.No value Pq default Sy 5 Ns s
is the number of seconds the ARC will wait before
trying to resume growth after a memory pressure event.
.
.It Sy zfs_arc_lotsfree_percent Ns = Ns Sy 10 Ns % Pq int
Throttle I/O when free system memory drops below this percentage of total
system memory.
Setting this value to
.Sy 0
will disable the throttle.
.
.It Sy zfs_arc_max Ns = Ns Sy 0 Ns B Pq u64
Max size of ARC in bytes.
If
.Sy 0 ,
then the max size of ARC is determined by the amount of system memory installed.
Under Linux, half of system memory will be used as the limit.
Under
.Fx ,
the larger of
.Sy all_system_memory No \- Sy 1 GiB
and
.Sy 5/8 No \(mu Sy all_system_memory
will be used as the limit.
This value must be at least
.Sy 67108864 Ns B Pq 64 MiB .
.Pp
This value can be changed dynamically, with some caveats.
It cannot be set back to
.Sy 0
while running, and reducing it below the current ARC size will not cause
the ARC to shrink without memory pressure to induce shrinking.
.
.It Sy zfs_arc_meta_balance Ns = Ns Sy 500 Pq uint
Balance between metadata and data on ghost hits.
Values above 100 increase metadata caching by proportionally reducing effect
of ghost data hits on target data/metadata rate.
.
.It Sy zfs_arc_min Ns = Ns Sy 0 Ns B Pq u64
Min size of ARC in bytes.
.No If set to Sy 0 , arc_c_min
will default to consuming the larger of
.Sy 32 MiB
and
.Sy all_system_memory No / Sy 32 .
.
.It Sy zfs_arc_min_prefetch_ms Ns = Ns Sy 0 Ns ms Ns Po Ns ≡ Ns 1s Pc Pq uint
Minimum time prefetched blocks are locked in the ARC.
.
.It Sy zfs_arc_min_prescient_prefetch_ms Ns = Ns Sy 0 Ns ms Ns Po Ns ≡ Ns 6s Pc Pq uint
Minimum time "prescient prefetched" blocks are locked in the ARC.
These blocks are meant to be prefetched fairly aggressively ahead of
the code that may use them.
.
.It Sy zfs_arc_prune_task_threads Ns = Ns Sy 1 Pq int
Number of arc_prune threads.
.Fx
does not need more than one.
Linux may theoretically use one per mount point up to number of CPUs,
but that was not proven to be useful.
.
.It Sy zfs_max_missing_tvds Ns = Ns Sy 0 Pq int
Number of missing top-level vdevs which will be allowed during
pool import (only in read-only mode).
.
.It Sy zfs_max_nvlist_src_size Ns = Sy 0 Pq u64
Maximum size in bytes allowed to be passed as
.Sy zc_nvlist_src_size
for ioctls on
.Pa /dev/zfs .
This prevents a user from causing the kernel to allocate
an excessive amount of memory.
When the limit is exceeded, the ioctl fails with
.Sy EINVAL
and a description of the error is sent to the
.Pa zfs-dbgmsg
log.
This parameter should not need to be touched under normal circumstances.
If
.Sy 0 ,
equivalent to a quarter of the user-wired memory limit under
.Fx
and to
.Sy 134217728 Ns B Pq 128 MiB
under Linux.
.
.It Sy zfs_multilist_num_sublists Ns = Ns Sy 0 Pq uint
To allow more fine-grained locking, each ARC state contains a series
of lists for both data and metadata objects.
Locking is performed at the level of these "sub-lists".
This parameters controls the number of sub-lists per ARC state,
and also applies to other uses of the multilist data structure.
.Pp
If
.Sy 0 ,
equivalent to the greater of the number of online CPUs and
.Sy 4 .
.
.It Sy zfs_arc_overflow_shift Ns = Ns Sy 8 Pq int
The ARC size is considered to be overflowing if it exceeds the current
ARC target size
.Pq Sy arc_c
by thresholds determined by this parameter.
Exceeding by
.Sy ( arc_c No >> Sy zfs_arc_overflow_shift ) No / Sy 2
starts ARC reclamation process.
If that appears insufficient, exceeding by
.Sy ( arc_c No >> Sy zfs_arc_overflow_shift ) No \(mu Sy 1.5
blocks new buffer allocation until the reclaim thread catches up.
Started reclamation process continues till ARC size returns below the
target size.
.Pp
The default value of
.Sy 8
causes the ARC to start reclamation if it exceeds the target size by
.Em 0.2%
of the target size, and block allocations by
.Em 0.6% .
.
.It Sy zfs_arc_shrink_shift Ns = Ns Sy 0 Pq uint
If nonzero, this will update
.Sy arc_shrink_shift Pq default Sy 7
with the new value.
.
.It Sy zfs_arc_pc_percent Ns = Ns Sy 0 Ns % Po off Pc Pq uint
Percent of pagecache to reclaim ARC to.
.Pp
This tunable allows the ZFS ARC to play more nicely
with the kernel's LRU pagecache.
It can guarantee that the ARC size won't collapse under scanning
pressure on the pagecache, yet still allows the ARC to be reclaimed down to
.Sy zfs_arc_min
if necessary.
This value is specified as percent of pagecache size (as measured by
.Sy NR_FILE_PAGES ) ,
where that percent may exceed
.Sy 100 .
This
only operates during memory pressure/reclaim.
.
.It Sy zfs_arc_shrinker_limit Ns = Ns Sy 10000 Pq int
This is a limit on how many pages the ARC shrinker makes available for
eviction in response to one page allocation attempt.
Note that in practice, the kernel's shrinker can ask us to evict
up to about four times this for one allocation attempt.
.Pp
The default limit of
.Sy 10000 Pq in practice, Em 160 MiB No per allocation attempt with 4 KiB pages
limits the amount of time spent attempting to reclaim ARC memory to
less than 100 ms per allocation attempt,
even with a small average compressed block size of ~8 KiB.
.Pp
The parameter can be set to 0 (zero) to disable the limit,
and only applies on Linux.
.
.It Sy zfs_arc_sys_free Ns = Ns Sy 0 Ns B Pq u64
The target number of bytes the ARC should leave as free memory on the system.
If zero, equivalent to the bigger of
.Sy 512 KiB No and Sy all_system_memory/64 .
.
.It Sy zfs_autoimport_disable Ns = Ns Sy 1 Ns | Ns 0 Pq int
Disable pool import at module load by ignoring the cache file
.Pq Sy spa_config_path .
.
.It Sy zfs_checksum_events_per_second Ns = Ns Sy 20 Ns /s Pq uint
Rate limit checksum events to this many per second.
Note that this should not be set below the ZED thresholds
(currently 10 checksums over 10 seconds)
or else the daemon may not trigger any action.
.
.It Sy zfs_commit_timeout_pct Ns = Ns Sy 5 Ns % Pq uint
This controls the amount of time that a ZIL block (lwb) will remain "open"
when it isn't "full", and it has a thread waiting for it to be committed to
stable storage.
The timeout is scaled based on a percentage of the last lwb
latency to avoid significantly impacting the latency of each individual
transaction record (itx).
.
.It Sy zfs_condense_indirect_commit_entry_delay_ms Ns = Ns Sy 0 Ns ms Pq int
Vdev indirection layer (used for device removal) sleeps for this many
milliseconds during mapping generation.
Intended for use with the test suite to throttle vdev removal speed.
.
.It Sy zfs_condense_indirect_obsolete_pct Ns = Ns Sy 25 Ns % Pq uint
Minimum percent of obsolete bytes in vdev mapping required to attempt to
condense
.Pq see Sy zfs_condense_indirect_vdevs_enable .
Intended for use with the test suite
to facilitate triggering condensing as needed.
.
.It Sy zfs_condense_indirect_vdevs_enable Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable condensing indirect vdev mappings.
When set, attempt to condense indirect vdev mappings
if the mapping uses more than
.Sy zfs_condense_min_mapping_bytes
bytes of memory and if the obsolete space map object uses more than
.Sy zfs_condense_max_obsolete_bytes
bytes on-disk.
The condensing process is an attempt to save memory by removing obsolete
mappings.
.
.It Sy zfs_condense_max_obsolete_bytes Ns = Ns Sy 1073741824 Ns B Po 1 GiB Pc Pq u64
Only attempt to condense indirect vdev mappings if the on-disk size
of the obsolete space map object is greater than this number of bytes
.Pq see Sy zfs_condense_indirect_vdevs_enable .
.
.It Sy zfs_condense_min_mapping_bytes Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq u64
Minimum size vdev mapping to attempt to condense
.Pq see Sy zfs_condense_indirect_vdevs_enable .
.
.It Sy zfs_dbgmsg_enable Ns = Ns Sy 1 Ns | Ns 0 Pq int
Internally ZFS keeps a small log to facilitate debugging.
The log is enabled by default, and can be disabled by unsetting this option.
The contents of the log can be accessed by reading
.Pa /proc/spl/kstat/zfs/dbgmsg .
Writing
.Sy 0
to the file clears the log.
.Pp
This setting does not influence debug prints due to
.Sy zfs_flags .
.
.It Sy zfs_dbgmsg_maxsize Ns = Ns Sy 4194304 Ns B Po 4 MiB Pc Pq uint
Maximum size of the internal ZFS debug log.
.
.It Sy zfs_dbuf_state_index Ns = Ns Sy 0 Pq int
Historically used for controlling what reporting was available under
.Pa /proc/spl/kstat/zfs .
No effect.
.
.It Sy zfs_deadman_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
When a pool sync operation takes longer than
.Sy zfs_deadman_synctime_ms ,
or when an individual I/O operation takes longer than
.Sy zfs_deadman_ziotime_ms ,
then the operation is considered to be "hung".
If
.Sy zfs_deadman_enabled
is set, then the deadman behavior is invoked as described by
.Sy zfs_deadman_failmode .
By default, the deadman is enabled and set to
.Sy wait
which results in "hung" I/O operations only being logged.
The deadman is automatically disabled when a pool gets suspended.
.
.It Sy zfs_deadman_failmode Ns = Ns Sy wait Pq charp
Controls the failure behavior when the deadman detects a "hung" I/O operation.
Valid values are:
.Bl -tag -compact -offset 4n -width "continue"
.It Sy wait
Wait for a "hung" operation to complete.
For each "hung" operation a "deadman" event will be posted
describing that operation.
.It Sy continue
Attempt to recover from a "hung" operation by re-dispatching it
to the I/O pipeline if possible.
.It Sy panic
Panic the system.
This can be used to facilitate automatic fail-over
to a properly configured fail-over partner.
.El
.
.It Sy zfs_deadman_checktime_ms Ns = Ns Sy 60000 Ns ms Po 1 min Pc Pq u64
Check time in milliseconds.
This defines the frequency at which we check for hung I/O requests
and potentially invoke the
.Sy zfs_deadman_failmode
behavior.
.
.It Sy zfs_deadman_synctime_ms Ns = Ns Sy 600000 Ns ms Po 10 min Pc Pq u64
Interval in milliseconds after which the deadman is triggered and also
the interval after which a pool sync operation is considered to be "hung".
Once this limit is exceeded the deadman will be invoked every
.Sy zfs_deadman_checktime_ms
milliseconds until the pool sync completes.
.
.It Sy zfs_deadman_ziotime_ms Ns = Ns Sy 300000 Ns ms Po 5 min Pc Pq u64
Interval in milliseconds after which the deadman is triggered and an
individual I/O operation is considered to be "hung".
As long as the operation remains "hung",
the deadman will be invoked every
.Sy zfs_deadman_checktime_ms
milliseconds until the operation completes.
.
.It Sy zfs_dedup_prefetch Ns = Ns Sy 0 Ns | Ns 1 Pq int
Enable prefetching dedup-ed blocks which are going to be freed.
.
.It Sy zfs_delay_min_dirty_percent Ns = Ns Sy 60 Ns % Pq uint
Start to delay each transaction once there is this amount of dirty data,
expressed as a percentage of
.Sy zfs_dirty_data_max .
This value should be at least
.Sy zfs_vdev_async_write_active_max_dirty_percent .
.No See Sx ZFS TRANSACTION DELAY .
.
.It Sy zfs_delay_scale Ns = Ns Sy 500000 Pq int
This controls how quickly the transaction delay approaches infinity.
Larger values cause longer delays for a given amount of dirty data.
.Pp
For the smoothest delay, this value should be about 1 billion divided
by the maximum number of operations per second.
This will smoothly handle between ten times and a tenth of this number.
.No See Sx ZFS TRANSACTION DELAY .
.Pp
.Sy zfs_delay_scale No \(mu Sy zfs_dirty_data_max Em must No be smaller than Sy 2^64 .
.
.It Sy zfs_disable_ivset_guid_check Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disables requirement for IVset GUIDs to be present and match when doing a raw
receive of encrypted datasets.
Intended for users whose pools were created with
OpenZFS pre-release versions and now have compatibility issues.
.
.It Sy zfs_key_max_salt_uses Ns = Ns Sy 400000000 Po 4*10^8 Pc Pq ulong
Maximum number of uses of a single salt value before generating a new one for
encrypted datasets.
The default value is also the maximum.
.
.It Sy zfs_object_mutex_size Ns = Ns Sy 64 Pq uint
Size of the znode hashtable used for holds.
.Pp
Due to the need to hold locks on objects that may not exist yet, kernel mutexes
are not created per-object and instead a hashtable is used where collisions
will result in objects waiting when there is not actually contention on the
same object.
.
.It Sy zfs_slow_io_events_per_second Ns = Ns Sy 20 Ns /s Pq int
Rate limit delay and deadman zevents (which report slow I/O operations) to this
many per
second.
.
.It Sy zfs_unflushed_max_mem_amt Ns = Ns Sy 1073741824 Ns B Po 1 GiB Pc Pq u64
Upper-bound limit for unflushed metadata changes to be held by the
log spacemap in memory, in bytes.
.
.It Sy zfs_unflushed_max_mem_ppm Ns = Ns Sy 1000 Ns ppm Po 0.1% Pc Pq u64
Part of overall system memory that ZFS allows to be used
for unflushed metadata changes by the log spacemap, in millionths.
.
.It Sy zfs_unflushed_log_block_max Ns = Ns Sy 131072 Po 128k Pc Pq u64
Describes the maximum number of log spacemap blocks allowed for each pool.
The default value means that the space in all the log spacemaps
can add up to no more than
.Sy 131072
blocks (which means
.Em 16 GiB
of logical space before compression and ditto blocks,
assuming that blocksize is
.Em 128 KiB ) .
.Pp
This tunable is important because it involves a trade-off between import
time after an unclean export and the frequency of flushing metaslabs.
The higher this number is, the more log blocks we allow when the pool is
active which means that we flush metaslabs less often and thus decrease
the number of I/O operations for spacemap updates per TXG.
At the same time though, that means that in the event of an unclean export,
there will be more log spacemap blocks for us to read, inducing overhead
in the import time of the pool.
The lower the number, the amount of flushing increases, destroying log
blocks quicker as they become obsolete faster, which leaves less blocks
to be read during import time after a crash.
.Pp
Each log spacemap block existing during pool import leads to approximately
one extra logical I/O issued.
This is the reason why this tunable is exposed in terms of blocks rather
than space used.
.
.It Sy zfs_unflushed_log_block_min Ns = Ns Sy 1000 Pq u64
If the number of metaslabs is small and our incoming rate is high,
we could get into a situation that we are flushing all our metaslabs every TXG.
Thus we always allow at least this many log blocks.
.
.It Sy zfs_unflushed_log_block_pct Ns = Ns Sy 400 Ns % Pq u64
Tunable used to determine the number of blocks that can be used for
the spacemap log, expressed as a percentage of the total number of
unflushed metaslabs in the pool.
.
.It Sy zfs_unflushed_log_txg_max Ns = Ns Sy 1000 Pq u64
Tunable limiting maximum time in TXGs any metaslab may remain unflushed.
It effectively limits maximum number of unflushed per-TXG spacemap logs
that need to be read after unclean pool export.
.
.It Sy zfs_unlink_suspend_progress Ns = Ns Sy 0 Ns | Ns 1 Pq uint
When enabled, files will not be asynchronously removed from the list of pending
unlinks and the space they consume will be leaked.
Once this option has been disabled and the dataset is remounted,
the pending unlinks will be processed and the freed space returned to the pool.
This option is used by the test suite.
.
.It Sy zfs_delete_blocks Ns = Ns Sy 20480 Pq ulong
This is the used to define a large file for the purposes of deletion.
Files containing more than
.Sy zfs_delete_blocks
will be deleted asynchronously, while smaller files are deleted synchronously.
Decreasing this value will reduce the time spent in an
.Xr unlink 2
system call, at the expense of a longer delay before the freed space is
available.
This only applies on Linux.
.
.It Sy zfs_dirty_data_max Ns = Pq int
Determines the dirty space limit in bytes.
Once this limit is exceeded, new writes are halted until space frees up.
This parameter takes precedence over
.Sy zfs_dirty_data_max_percent .
.No See Sx ZFS TRANSACTION DELAY .
.Pp
Defaults to
.Sy physical_ram/10 ,
capped at
.Sy zfs_dirty_data_max_max .
.
.It Sy zfs_dirty_data_max_max Ns = Pq int
Maximum allowable value of
.Sy zfs_dirty_data_max ,
expressed in bytes.
This limit is only enforced at module load time, and will be ignored if
.Sy zfs_dirty_data_max
is later changed.
This parameter takes precedence over
.Sy zfs_dirty_data_max_max_percent .
.No See Sx ZFS TRANSACTION DELAY .
.Pp
Defaults to
.Sy min(physical_ram/4, 4GiB) ,
or
.Sy min(physical_ram/4, 1GiB)
for 32-bit systems.
.
.It Sy zfs_dirty_data_max_max_percent Ns = Ns Sy 25 Ns % Pq uint
Maximum allowable value of
.Sy zfs_dirty_data_max ,
expressed as a percentage of physical RAM.
This limit is only enforced at module load time, and will be ignored if
.Sy zfs_dirty_data_max
is later changed.
The parameter
.Sy zfs_dirty_data_max_max
takes precedence over this one.
.No See Sx ZFS TRANSACTION DELAY .
.
.It Sy zfs_dirty_data_max_percent Ns = Ns Sy 10 Ns % Pq uint
Determines the dirty space limit, expressed as a percentage of all memory.
Once this limit is exceeded, new writes are halted until space frees up.
The parameter
.Sy zfs_dirty_data_max
takes precedence over this one.
.No See Sx ZFS TRANSACTION DELAY .
.Pp
Subject to
.Sy zfs_dirty_data_max_max .
.
.It Sy zfs_dirty_data_sync_percent Ns = Ns Sy 20 Ns % Pq uint
Start syncing out a transaction group if there's at least this much dirty data
.Pq as a percentage of Sy zfs_dirty_data_max .
This should be less than
.Sy zfs_vdev_async_write_active_min_dirty_percent .
.
.It Sy zfs_wrlog_data_max Ns = Pq int
The upper limit of write-transaction zil log data size in bytes.
Write operations are throttled when approaching the limit until log data is
cleared out after transaction group sync.
Because of some overhead, it should be set at least 2 times the size of
.Sy zfs_dirty_data_max
.No to prevent harming normal write throughput .
It also should be smaller than the size of the slog device if slog is present.
.Pp
Defaults to
.Sy zfs_dirty_data_max*2
.
.It Sy zfs_fallocate_reserve_percent Ns = Ns Sy 110 Ns % Pq uint
Since ZFS is a copy-on-write filesystem with snapshots, blocks cannot be
preallocated for a file in order to guarantee that later writes will not
run out of space.
Instead,
.Xr fallocate 2
space preallocation only checks that sufficient space is currently available
in the pool or the user's project quota allocation,
and then creates a sparse file of the requested size.
The requested space is multiplied by
.Sy zfs_fallocate_reserve_percent
to allow additional space for indirect blocks and other internal metadata.
Setting this to
.Sy 0
disables support for
.Xr fallocate 2
and causes it to return
.Sy EOPNOTSUPP .
.
.It Sy zfs_fletcher_4_impl Ns = Ns Sy fastest Pq string
Select a fletcher 4 implementation.
.Pp
Supported selectors are:
.Sy fastest , scalar , sse2 , ssse3 , avx2 , avx512f , avx512bw ,
.No and Sy aarch64_neon .
All except
.Sy fastest No and Sy scalar
require instruction set extensions to be available,
and will only appear if ZFS detects that they are present at runtime.
If multiple implementations of fletcher 4 are available, the
.Sy fastest
will be chosen using a micro benchmark.
Selecting
.Sy scalar
results in the original CPU-based calculation being used.
Selecting any option other than
.Sy fastest No or Sy scalar
results in vector instructions
from the respective CPU instruction set being used.
.
.It Sy zfs_blake3_impl Ns = Ns Sy fastest Pq string
Select a BLAKE3 implementation.
.Pp
Supported selectors are:
.Sy cycle , fastest , generic , sse2 , sse41 , avx2 , avx512 .
All except
.Sy cycle , fastest No and Sy generic
require instruction set extensions to be available,
and will only appear if ZFS detects that they are present at runtime.
If multiple implementations of BLAKE3 are available, the
.Sy fastest will be chosen using a micro benchmark. You can see the
benchmark results by reading this kstat file:
.Pa /proc/spl/kstat/zfs/chksum_bench .
.
.It Sy zfs_free_bpobj_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable/disable the processing of the free_bpobj object.
.
.It Sy zfs_async_block_max_blocks Ns = Ns Sy UINT64_MAX Po unlimited Pc Pq u64
Maximum number of blocks freed in a single TXG.
.
.It Sy zfs_max_async_dedup_frees Ns = Ns Sy 100000 Po 10^5 Pc Pq u64
Maximum number of dedup blocks freed in a single TXG.
.
.It Sy zfs_vdev_async_read_max_active Ns = Ns Sy 3 Pq uint
Maximum asynchronous read I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_read_min_active Ns = Ns Sy 1 Pq uint
Minimum asynchronous read I/O operation active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_active_max_dirty_percent Ns = Ns Sy 60 Ns % Pq uint
When the pool has more than this much dirty data, use
.Sy zfs_vdev_async_write_max_active
to limit active async writes.
If the dirty data is between the minimum and maximum,
the active I/O limit is linearly interpolated.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_active_min_dirty_percent Ns = Ns Sy 30 Ns % Pq uint
When the pool has less than this much dirty data, use
.Sy zfs_vdev_async_write_min_active
to limit active async writes.
If the dirty data is between the minimum and maximum,
the active I/O limit is linearly
interpolated.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_max_active Ns = Ns Sy 10 Pq uint
Maximum asynchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_min_active Ns = Ns Sy 2 Pq uint
Minimum asynchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.Pp
Lower values are associated with better latency on rotational media but poorer
resilver performance.
The default value of
.Sy 2
was chosen as a compromise.
A value of
.Sy 3
has been shown to improve resilver performance further at a cost of
further increasing latency.
.
.It Sy zfs_vdev_initializing_max_active Ns = Ns Sy 1 Pq uint
Maximum initializing I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_initializing_min_active Ns = Ns Sy 1 Pq uint
Minimum initializing I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_max_active Ns = Ns Sy 1000 Pq uint
The maximum number of I/O operations active to each device.
Ideally, this will be at least the sum of each queue's
.Sy max_active .
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_open_timeout_ms Ns = Ns Sy 1000 Pq uint
Timeout value to wait before determining a device is missing
during import.
This is helpful for transient missing paths due
to links being briefly removed and recreated in response to
udev events.
.
.It Sy zfs_vdev_rebuild_max_active Ns = Ns Sy 3 Pq uint
Maximum sequential resilver I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_rebuild_min_active Ns = Ns Sy 1 Pq uint
Minimum sequential resilver I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_removal_max_active Ns = Ns Sy 2 Pq uint
Maximum removal I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_removal_min_active Ns = Ns Sy 1 Pq uint
Minimum removal I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_scrub_max_active Ns = Ns Sy 2 Pq uint
Maximum scrub I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_scrub_min_active Ns = Ns Sy 1 Pq uint
Minimum scrub I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_read_max_active Ns = Ns Sy 10 Pq uint
Maximum synchronous read I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_read_min_active Ns = Ns Sy 10 Pq uint
Minimum synchronous read I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_write_max_active Ns = Ns Sy 10 Pq uint
Maximum synchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_write_min_active Ns = Ns Sy 10 Pq uint
Minimum synchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_trim_max_active Ns = Ns Sy 2 Pq uint
Maximum trim/discard I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_trim_min_active Ns = Ns Sy 1 Pq uint
Minimum trim/discard I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_nia_delay Ns = Ns Sy 5 Pq uint
For non-interactive I/O (scrub, resilver, removal, initialize and rebuild),
the number of concurrently-active I/O operations is limited to
.Sy zfs_*_min_active ,
unless the vdev is "idle".
When there are no interactive I/O operations active (synchronous or otherwise),
and
.Sy zfs_vdev_nia_delay
operations have completed since the last interactive operation,
then the vdev is considered to be "idle",
and the number of concurrently-active non-interactive operations is increased to
.Sy zfs_*_max_active .
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_nia_credit Ns = Ns Sy 5 Pq uint
Some HDDs tend to prioritize sequential I/O so strongly, that concurrent
random I/O latency reaches several seconds.
On some HDDs this happens even if sequential I/O operations
are submitted one at a time, and so setting
.Sy zfs_*_max_active Ns = Sy 1
does not help.
To prevent non-interactive I/O, like scrub,
from monopolizing the device, no more than
.Sy zfs_vdev_nia_credit operations can be sent
while there are outstanding incomplete interactive operations.
This enforced wait ensures the HDD services the interactive I/O
within a reasonable amount of time.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_queue_depth_pct Ns = Ns Sy 1000 Ns % Pq uint
Maximum number of queued allocations per top-level vdev expressed as
a percentage of
.Sy zfs_vdev_async_write_max_active ,
which allows the system to detect devices that are more capable
of handling allocations and to allocate more blocks to those devices.
This allows for dynamic allocation distribution when devices are imbalanced,
as fuller devices will tend to be slower than empty devices.
.Pp
Also see
.Sy zio_dva_throttle_enabled .
.
.It Sy zfs_vdev_def_queue_depth Ns = Ns Sy 32 Pq uint
Default queue depth for each vdev IO allocator.
Higher values allow for better coalescing of sequential writes before sending
them to the disk, but can increase transaction commit times.
.
.It Sy zfs_vdev_failfast_mask Ns = Ns Sy 1 Pq uint
Defines if the driver should retire on a given error type.
The following options may be bitwise-ored together:
.TS
box;
lbz r l l .
Value Name Description
_
1 Device No driver retries on device errors
2 Transport No driver retries on transport errors.
4 Driver No driver retries on driver errors.
.TE
.
.It Sy zfs_expire_snapshot Ns = Ns Sy 300 Ns s Pq int
Time before expiring
.Pa .zfs/snapshot .
.
.It Sy zfs_admin_snapshot Ns = Ns Sy 0 Ns | Ns 1 Pq int
Allow the creation, removal, or renaming of entries in the
.Sy .zfs/snapshot
directory to cause the creation, destruction, or renaming of snapshots.
When enabled, this functionality works both locally and over NFS exports
which have the
.Em no_root_squash
option set.
.
.It Sy zfs_flags Ns = Ns Sy 0 Pq int
Set additional debugging flags.
The following flags may be bitwise-ored together:
.TS
box;
lbz r l l .
Value Name Description
_
1 ZFS_DEBUG_DPRINTF Enable dprintf entries in the debug log.
* 2 ZFS_DEBUG_DBUF_VERIFY Enable extra dbuf verifications.
* 4 ZFS_DEBUG_DNODE_VERIFY Enable extra dnode verifications.
8 ZFS_DEBUG_SNAPNAMES Enable snapshot name verification.
* 16 ZFS_DEBUG_MODIFY Check for illegally modified ARC buffers.
64 ZFS_DEBUG_ZIO_FREE Enable verification of block frees.
128 ZFS_DEBUG_HISTOGRAM_VERIFY Enable extra spacemap histogram verifications.
256 ZFS_DEBUG_METASLAB_VERIFY Verify space accounting on disk matches in-memory \fBrange_trees\fP.
512 ZFS_DEBUG_SET_ERROR Enable \fBSET_ERROR\fP and dprintf entries in the debug log.
1024 ZFS_DEBUG_INDIRECT_REMAP Verify split blocks created by device removal.
2048 ZFS_DEBUG_TRIM Verify TRIM ranges are always within the allocatable range tree.
4096 ZFS_DEBUG_LOG_SPACEMAP Verify that the log summary is consistent with the spacemap log
and enable \fBzfs_dbgmsgs\fP for metaslab loading and flushing.
.TE
.Sy \& * No Requires debug build .
.
.It Sy zfs_btree_verify_intensity Ns = Ns Sy 0 Pq uint
Enables btree verification.
The following settings are culminative:
.TS
box;
lbz r l l .
Value Description
1 Verify height.
2 Verify pointers from children to parent.
3 Verify element counts.
4 Verify element order. (expensive)
* 5 Verify unused memory is poisoned. (expensive)
.TE
.Sy \& * No Requires debug build .
.
.It Sy zfs_free_leak_on_eio Ns = Ns Sy 0 Ns | Ns 1 Pq int
If destroy encounters an
.Sy EIO
while reading metadata (e.g. indirect blocks),
space referenced by the missing metadata can not be freed.
Normally this causes the background destroy to become "stalled",
as it is unable to make forward progress.
While in this stalled state, all remaining space to free
from the error-encountering filesystem is "temporarily leaked".
Set this flag to cause it to ignore the
.Sy EIO ,
permanently leak the space from indirect blocks that can not be read,
and continue to free everything else that it can.
.Pp
The default "stalling" behavior is useful if the storage partially
fails (i.e. some but not all I/O operations fail), and then later recovers.
In this case, we will be able to continue pool operations while it is
partially failed, and when it recovers, we can continue to free the
space, with no leaks.
Note, however, that this case is actually fairly rare.
.Pp
Typically pools either
.Bl -enum -compact -offset 4n -width "1."
.It
fail completely (but perhaps temporarily,
e.g. due to a top-level vdev going offline), or
.It
have localized, permanent errors (e.g. disk returns the wrong data
due to bit flip or firmware bug).
.El
In the former case, this setting does not matter because the
pool will be suspended and the sync thread will not be able to make
forward progress regardless.
In the latter, because the error is permanent, the best we can do
is leak the minimum amount of space,
which is what setting this flag will do.
It is therefore reasonable for this flag to normally be set,
but we chose the more conservative approach of not setting it,
so that there is no possibility of
leaking space in the "partial temporary" failure case.
.
.It Sy zfs_free_min_time_ms Ns = Ns Sy 1000 Ns ms Po 1s Pc Pq uint
During a
.Nm zfs Cm destroy
operation using the
.Sy async_destroy
feature,
a minimum of this much time will be spent working on freeing blocks per TXG.
.
.It Sy zfs_obsolete_min_time_ms Ns = Ns Sy 500 Ns ms Pq uint
Similar to
.Sy zfs_free_min_time_ms ,
but for cleanup of old indirection records for removed vdevs.
.
.It Sy zfs_immediate_write_sz Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq s64
Largest data block to write to the ZIL.
Larger blocks will be treated as if the dataset being written to had the
.Sy logbias Ns = Ns Sy throughput
property set.
.
.It Sy zfs_initialize_value Ns = Ns Sy 16045690984833335022 Po 0xDEADBEEFDEADBEEE Pc Pq u64
Pattern written to vdev free space by
.Xr zpool-initialize 8 .
.
.It Sy zfs_initialize_chunk_size Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
Size of writes used by
.Xr zpool-initialize 8 .
This option is used by the test suite.
.
.It Sy zfs_livelist_max_entries Ns = Ns Sy 500000 Po 5*10^5 Pc Pq u64
The threshold size (in block pointers) at which we create a new sub-livelist.
Larger sublists are more costly from a memory perspective but the fewer
sublists there are, the lower the cost of insertion.
.
.It Sy zfs_livelist_min_percent_shared Ns = Ns Sy 75 Ns % Pq int
If the amount of shared space between a snapshot and its clone drops below
this threshold, the clone turns off the livelist and reverts to the old
deletion method.
This is in place because livelists no long give us a benefit
once a clone has been overwritten enough.
.
.It Sy zfs_livelist_condense_new_alloc Ns = Ns Sy 0 Pq int
Incremented each time an extra ALLOC blkptr is added to a livelist entry while
it is being condensed.
This option is used by the test suite to track race conditions.
.
.It Sy zfs_livelist_condense_sync_cancel Ns = Ns Sy 0 Pq int
Incremented each time livelist condensing is canceled while in
.Fn spa_livelist_condense_sync .
This option is used by the test suite to track race conditions.
.
.It Sy zfs_livelist_condense_sync_pause Ns = Ns Sy 0 Ns | Ns 1 Pq int
When set, the livelist condense process pauses indefinitely before
executing the synctask \(em
.Fn spa_livelist_condense_sync .
This option is used by the test suite to trigger race conditions.
.
.It Sy zfs_livelist_condense_zthr_cancel Ns = Ns Sy 0 Pq int
Incremented each time livelist condensing is canceled while in
.Fn spa_livelist_condense_cb .
This option is used by the test suite to track race conditions.
.
.It Sy zfs_livelist_condense_zthr_pause Ns = Ns Sy 0 Ns | Ns 1 Pq int
When set, the livelist condense process pauses indefinitely before
executing the open context condensing work in
.Fn spa_livelist_condense_cb .
This option is used by the test suite to trigger race conditions.
.
.It Sy zfs_lua_max_instrlimit Ns = Ns Sy 100000000 Po 10^8 Pc Pq u64
The maximum execution time limit that can be set for a ZFS channel program,
specified as a number of Lua instructions.
.
.It Sy zfs_lua_max_memlimit Ns = Ns Sy 104857600 Po 100 MiB Pc Pq u64
The maximum memory limit that can be set for a ZFS channel program, specified
in bytes.
.
.It Sy zfs_max_dataset_nesting Ns = Ns Sy 50 Pq int
The maximum depth of nested datasets.
This value can be tuned temporarily to
fix existing datasets that exceed the predefined limit.
.
.It Sy zfs_max_log_walking Ns = Ns Sy 5 Pq u64
The number of past TXGs that the flushing algorithm of the log spacemap
feature uses to estimate incoming log blocks.
.
.It Sy zfs_max_logsm_summary_length Ns = Ns Sy 10 Pq u64
Maximum number of rows allowed in the summary of the spacemap log.
.
.It Sy zfs_max_recordsize Ns = Ns Sy 16777216 Po 16 MiB Pc Pq uint
We currently support block sizes from
.Em 512 Po 512 B Pc No to Em 16777216 Po 16 MiB Pc .
The benefits of larger blocks, and thus larger I/O,
need to be weighed against the cost of COWing a giant block to modify one byte.
Additionally, very large blocks can have an impact on I/O latency,
and also potentially on the memory allocator.
Therefore, we formerly forbade creating blocks larger than 1M.
Larger blocks could be created by changing it,
and pools with larger blocks can always be imported and used,
regardless of this setting.
.
.It Sy zfs_allow_redacted_dataset_mount Ns = Ns Sy 0 Ns | Ns 1 Pq int
Allow datasets received with redacted send/receive to be mounted.
Normally disabled because these datasets may be missing key data.
.
.It Sy zfs_min_metaslabs_to_flush Ns = Ns Sy 1 Pq u64
Minimum number of metaslabs to flush per dirty TXG.
.
.It Sy zfs_metaslab_fragmentation_threshold Ns = Ns Sy 70 Ns % Pq uint
Allow metaslabs to keep their active state as long as their fragmentation
percentage is no more than this value.
An active metaslab that exceeds this threshold
will no longer keep its active status allowing better metaslabs to be selected.
.
.It Sy zfs_mg_fragmentation_threshold Ns = Ns Sy 95 Ns % Pq uint
Metaslab groups are considered eligible for allocations if their
fragmentation metric (measured as a percentage) is less than or equal to
this value.
If a metaslab group exceeds this threshold then it will be
skipped unless all metaslab groups within the metaslab class have also
crossed this threshold.
.
.It Sy zfs_mg_noalloc_threshold Ns = Ns Sy 0 Ns % Pq uint
Defines a threshold at which metaslab groups should be eligible for allocations.
The value is expressed as a percentage of free space
beyond which a metaslab group is always eligible for allocations.
If a metaslab group's free space is less than or equal to the
threshold, the allocator will avoid allocating to that group
unless all groups in the pool have reached the threshold.
Once all groups have reached the threshold, all groups are allowed to accept
allocations.
The default value of
.Sy 0
disables the feature and causes all metaslab groups to be eligible for
allocations.
.Pp
This parameter allows one to deal with pools having heavily imbalanced
vdevs such as would be the case when a new vdev has been added.
Setting the threshold to a non-zero percentage will stop allocations
from being made to vdevs that aren't filled to the specified percentage
and allow lesser filled vdevs to acquire more allocations than they
otherwise would under the old
.Sy zfs_mg_alloc_failures
facility.
.
.It Sy zfs_ddt_data_is_special Ns = Ns Sy 1 Ns | Ns 0 Pq int
If enabled, ZFS will place DDT data into the special allocation class.
.
.It Sy zfs_user_indirect_is_special Ns = Ns Sy 1 Ns | Ns 0 Pq int
If enabled, ZFS will place user data indirect blocks
into the special allocation class.
.
.It Sy zfs_multihost_history Ns = Ns Sy 0 Pq uint
Historical statistics for this many latest multihost updates will be available
in
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /multihost .
.
.It Sy zfs_multihost_interval Ns = Ns Sy 1000 Ns ms Po 1 s Pc Pq u64
Used to control the frequency of multihost writes which are performed when the
.Sy multihost
pool property is on.
This is one of the factors used to determine the
length of the activity check during import.
.Pp
The multihost write period is
.Sy zfs_multihost_interval No / Sy leaf-vdevs .
On average a multihost write will be issued for each leaf vdev
every
.Sy zfs_multihost_interval
milliseconds.
In practice, the observed period can vary with the I/O load
and this observed value is the delay which is stored in the uberblock.
.
.It Sy zfs_multihost_import_intervals Ns = Ns Sy 20 Pq uint
Used to control the duration of the activity test on import.
Smaller values of
.Sy zfs_multihost_import_intervals
will reduce the import time but increase
the risk of failing to detect an active pool.
The total activity check time is never allowed to drop below one second.
.Pp
On import the activity check waits a minimum amount of time determined by
.Sy zfs_multihost_interval No \(mu Sy zfs_multihost_import_intervals ,
or the same product computed on the host which last had the pool imported,
whichever is greater.
The activity check time may be further extended if the value of MMP
delay found in the best uberblock indicates actual multihost updates happened
at longer intervals than
.Sy zfs_multihost_interval .
A minimum of
.Em 100 ms
is enforced.
.Pp
.Sy 0 No is equivalent to Sy 1 .
.
.It Sy zfs_multihost_fail_intervals Ns = Ns Sy 10 Pq uint
Controls the behavior of the pool when multihost write failures or delays are
detected.
.Pp
When
.Sy 0 ,
multihost write failures or delays are ignored.
The failures will still be reported to the ZED which depending on
its configuration may take action such as suspending the pool or offlining a
device.
.Pp
Otherwise, the pool will be suspended if
.Sy zfs_multihost_fail_intervals No \(mu Sy zfs_multihost_interval
milliseconds pass without a successful MMP write.
This guarantees the activity test will see MMP writes if the pool is imported.
.Sy 1 No is equivalent to Sy 2 ;
this is necessary to prevent the pool from being suspended
due to normal, small I/O latency variations.
.
.It Sy zfs_no_scrub_io Ns = Ns Sy 0 Ns | Ns 1 Pq int
Set to disable scrub I/O.
This results in scrubs not actually scrubbing data and
simply doing a metadata crawl of the pool instead.
.
.It Sy zfs_no_scrub_prefetch Ns = Ns Sy 0 Ns | Ns 1 Pq int
Set to disable block prefetching for scrubs.
.
.It Sy zfs_nocacheflush Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable cache flush operations on disks when writing.
Setting this will cause pool corruption on power loss
if a volatile out-of-order write cache is enabled.
.
.It Sy zfs_nopwrite_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Allow no-operation writes.
The occurrence of nopwrites will further depend on other pool properties
.Pq i.a. the checksumming and compression algorithms .
.
.It Sy zfs_dmu_offset_next_sync Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable forcing TXG sync to find holes.
When enabled forces ZFS to sync data when
.Sy SEEK_HOLE No or Sy SEEK_DATA
flags are used allowing holes in a file to be accurately reported.
When disabled holes will not be reported in recently dirtied files.
.
.It Sy zfs_pd_bytes_max Ns = Ns Sy 52428800 Ns B Po 50 MiB Pc Pq int
The number of bytes which should be prefetched during a pool traversal, like
.Nm zfs Cm send
or other data crawling operations.
.
.It Sy zfs_traverse_indirect_prefetch_limit Ns = Ns Sy 32 Pq uint
The number of blocks pointed by indirect (non-L0) block which should be
prefetched during a pool traversal, like
.Nm zfs Cm send
or other data crawling operations.
.
.It Sy zfs_per_txg_dirty_frees_percent Ns = Ns Sy 30 Ns % Pq u64
Control percentage of dirtied indirect blocks from frees allowed into one TXG.
After this threshold is crossed, additional frees will wait until the next TXG.
.Sy 0 No disables this throttle .
.
.It Sy zfs_prefetch_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable predictive prefetch.
Note that it leaves "prescient" prefetch
.Pq for, e.g., Nm zfs Cm send
intact.
Unlike predictive prefetch, prescient prefetch never issues I/O
that ends up not being needed, so it can't hurt performance.
.
.It Sy zfs_qat_checksum_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable QAT hardware acceleration for SHA256 checksums.
May be unset after the ZFS modules have been loaded to initialize the QAT
hardware as long as support is compiled in and the QAT driver is present.
.
.It Sy zfs_qat_compress_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable QAT hardware acceleration for gzip compression.
May be unset after the ZFS modules have been loaded to initialize the QAT
hardware as long as support is compiled in and the QAT driver is present.
.
.It Sy zfs_qat_encrypt_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable QAT hardware acceleration for AES-GCM encryption.
May be unset after the ZFS modules have been loaded to initialize the QAT
hardware as long as support is compiled in and the QAT driver is present.
.
.It Sy zfs_vnops_read_chunk_size Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
Bytes to read per chunk.
.
.It Sy zfs_read_history Ns = Ns Sy 0 Pq uint
Historical statistics for this many latest reads will be available in
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /reads .
.
.It Sy zfs_read_history_hits Ns = Ns Sy 0 Ns | Ns 1 Pq int
Include cache hits in read history
.
.It Sy zfs_rebuild_max_segment Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
Maximum read segment size to issue when sequentially resilvering a
top-level vdev.
.
.It Sy zfs_rebuild_scrub_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Automatically start a pool scrub when the last active sequential resilver
completes in order to verify the checksums of all blocks which have been
resilvered.
This is enabled by default and strongly recommended.
.
.It Sy zfs_rebuild_vdev_limit Ns = Ns Sy 67108864 Ns B Po 64 MiB Pc Pq u64
Maximum amount of I/O that can be concurrently issued for a sequential
resilver per leaf device, given in bytes.
.
.It Sy zfs_reconstruct_indirect_combinations_max Ns = Ns Sy 4096 Pq int
If an indirect split block contains more than this many possible unique
combinations when being reconstructed, consider it too computationally
expensive to check them all.
Instead, try at most this many randomly selected
combinations each time the block is accessed.
This allows all segment copies to participate fairly
in the reconstruction when all combinations
cannot be checked and prevents repeated use of one bad copy.
.
.It Sy zfs_recover Ns = Ns Sy 0 Ns | Ns 1 Pq int
Set to attempt to recover from fatal errors.
This should only be used as a last resort,
as it typically results in leaked space, or worse.
.
.It Sy zfs_removal_ignore_errors Ns = Ns Sy 0 Ns | Ns 1 Pq int
Ignore hard I/O errors during device removal.
When set, if a device encounters a hard I/O error during the removal process
the removal will not be cancelled.
This can result in a normally recoverable block becoming permanently damaged
and is hence not recommended.
This should only be used as a last resort when the
pool cannot be returned to a healthy state prior to removing the device.
.
.It Sy zfs_removal_suspend_progress Ns = Ns Sy 0 Ns | Ns 1 Pq uint
This is used by the test suite so that it can ensure that certain actions
happen while in the middle of a removal.
.
.It Sy zfs_remove_max_segment Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint
The largest contiguous segment that we will attempt to allocate when removing
a device.
If there is a performance problem with attempting to allocate large blocks,
consider decreasing this.
The default value is also the maximum.
.
.It Sy zfs_resilver_disable_defer Ns = Ns Sy 0 Ns | Ns 1 Pq int
Ignore the
.Sy resilver_defer
feature, causing an operation that would start a resilver to
immediately restart the one in progress.
.
.It Sy zfs_resilver_min_time_ms Ns = Ns Sy 3000 Ns ms Po 3 s Pc Pq uint
Resilvers are processed by the sync thread.
While resilvering, it will spend at least this much time
working on a resilver between TXG flushes.
.
.It Sy zfs_scan_ignore_errors Ns = Ns Sy 0 Ns | Ns 1 Pq int
If set, remove the DTL (dirty time list) upon completion of a pool scan (scrub),
even if there were unrepairable errors.
Intended to be used during pool repair or recovery to
stop resilvering when the pool is next imported.
.
.It Sy zfs_scrub_min_time_ms Ns = Ns Sy 1000 Ns ms Po 1 s Pc Pq uint
Scrubs are processed by the sync thread.
While scrubbing, it will spend at least this much time
working on a scrub between TXG flushes.
.
.It Sy zfs_scrub_error_blocks_per_txg Ns = Ns Sy 4096 Pq uint
Error blocks to be scrubbed in one txg.
.
.It Sy zfs_scan_checkpoint_intval Ns = Ns Sy 7200 Ns s Po 2 hour Pc Pq uint
To preserve progress across reboots, the sequential scan algorithm periodically
needs to stop metadata scanning and issue all the verification I/O to disk.
The frequency of this flushing is determined by this tunable.
.
.It Sy zfs_scan_fill_weight Ns = Ns Sy 3 Pq uint
This tunable affects how scrub and resilver I/O segments are ordered.
A higher number indicates that we care more about how filled in a segment is,
while a lower number indicates we care more about the size of the extent without
considering the gaps within a segment.
This value is only tunable upon module insertion.
Changing the value afterwards will have no effect on scrub or resilver
performance.
.
.It Sy zfs_scan_issue_strategy Ns = Ns Sy 0 Pq uint
Determines the order that data will be verified while scrubbing or resilvering:
.Bl -tag -compact -offset 4n -width "a"
.It Sy 1
Data will be verified as sequentially as possible, given the
amount of memory reserved for scrubbing
.Pq see Sy zfs_scan_mem_lim_fact .
This may improve scrub performance if the pool's data is very fragmented.
.It Sy 2
The largest mostly-contiguous chunk of found data will be verified first.
By deferring scrubbing of small segments, we may later find adjacent data
to coalesce and increase the segment size.
.It Sy 0
.No Use strategy Sy 1 No during normal verification
.No and strategy Sy 2 No while taking a checkpoint .
.El
.
.It Sy zfs_scan_legacy Ns = Ns Sy 0 Ns | Ns 1 Pq int
If unset, indicates that scrubs and resilvers will gather metadata in
memory before issuing sequential I/O.
Otherwise indicates that the legacy algorithm will be used,
where I/O is initiated as soon as it is discovered.
Unsetting will not affect scrubs or resilvers that are already in progress.
.
.It Sy zfs_scan_max_ext_gap Ns = Ns Sy 2097152 Ns B Po 2 MiB Pc Pq int
Sets the largest gap in bytes between scrub/resilver I/O operations
that will still be considered sequential for sorting purposes.
Changing this value will not
affect scrubs or resilvers that are already in progress.
.
.It Sy zfs_scan_mem_lim_fact Ns = Ns Sy 20 Ns ^-1 Pq uint
Maximum fraction of RAM used for I/O sorting by sequential scan algorithm.
This tunable determines the hard limit for I/O sorting memory usage.
When the hard limit is reached we stop scanning metadata and start issuing
data verification I/O.
This is done until we get below the soft limit.
.
.It Sy zfs_scan_mem_lim_soft_fact Ns = Ns Sy 20 Ns ^-1 Pq uint
The fraction of the hard limit used to determined the soft limit for I/O sorting
by the sequential scan algorithm.
When we cross this limit from below no action is taken.
When we cross this limit from above it is because we are issuing verification
I/O.
In this case (unless the metadata scan is done) we stop issuing verification I/O
and start scanning metadata again until we get to the hard limit.
.
.It Sy zfs_scan_report_txgs Ns = Ns Sy 0 Ns | Ns 1 Pq uint
When reporting resilver throughput and estimated completion time use the
performance observed over roughly the last
.Sy zfs_scan_report_txgs
TXGs.
When set to zero performance is calculated over the time between checkpoints.
.
.It Sy zfs_scan_strict_mem_lim Ns = Ns Sy 0 Ns | Ns 1 Pq int
Enforce tight memory limits on pool scans when a sequential scan is in progress.
When disabled, the memory limit may be exceeded by fast disks.
.
.It Sy zfs_scan_suspend_progress Ns = Ns Sy 0 Ns | Ns 1 Pq int
Freezes a scrub/resilver in progress without actually pausing it.
Intended for testing/debugging.
.
.It Sy zfs_scan_vdev_limit Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq int
Maximum amount of data that can be concurrently issued at once for scrubs and
resilvers per leaf device, given in bytes.
.
.It Sy zfs_send_corrupt_data Ns = Ns Sy 0 Ns | Ns 1 Pq int
Allow sending of corrupt data (ignore read/checksum errors when sending).
.
.It Sy zfs_send_unmodified_spill_blocks Ns = Ns Sy 1 Ns | Ns 0 Pq int
Include unmodified spill blocks in the send stream.
Under certain circumstances, previous versions of ZFS could incorrectly
remove the spill block from an existing object.
Including unmodified copies of the spill blocks creates a backwards-compatible
stream which will recreate a spill block if it was incorrectly removed.
.
.It Sy zfs_send_no_prefetch_queue_ff Ns = Ns Sy 20 Ns ^\-1 Pq uint
The fill fraction of the
.Nm zfs Cm send
internal queues.
The fill fraction controls the timing with which internal threads are woken up.
.
.It Sy zfs_send_no_prefetch_queue_length Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq uint
The maximum number of bytes allowed in
.Nm zfs Cm send Ns 's
internal queues.
.
.It Sy zfs_send_queue_ff Ns = Ns Sy 20 Ns ^\-1 Pq uint
The fill fraction of the
.Nm zfs Cm send
prefetch queue.
The fill fraction controls the timing with which internal threads are woken up.
.
.It Sy zfs_send_queue_length Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint
The maximum number of bytes allowed that will be prefetched by
.Nm zfs Cm send .
This value must be at least twice the maximum block size in use.
.
.It Sy zfs_recv_queue_ff Ns = Ns Sy 20 Ns ^\-1 Pq uint
The fill fraction of the
.Nm zfs Cm receive
queue.
The fill fraction controls the timing with which internal threads are woken up.
.
.It Sy zfs_recv_queue_length Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint
The maximum number of bytes allowed in the
.Nm zfs Cm receive
queue.
This value must be at least twice the maximum block size in use.
.
.It Sy zfs_recv_write_batch_size Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq uint
The maximum amount of data, in bytes, that
.Nm zfs Cm receive
will write in one DMU transaction.
This is the uncompressed size, even when receiving a compressed send stream.
This setting will not reduce the write size below a single block.
Capped at a maximum of
.Sy 32 MiB .
.
.It Sy zfs_recv_best_effort_corrective Ns = Ns Sy 0 Pq int
When this variable is set to non-zero a corrective receive:
.Bl -enum -compact -offset 4n -width "1."
.It
Does not enforce the restriction of source & destination snapshot GUIDs
matching.
.It
If there is an error during healing, the healing receive is not
terminated instead it moves on to the next record.
.El
.
.It Sy zfs_override_estimate_recordsize Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Setting this variable overrides the default logic for estimating block
sizes when doing a
.Nm zfs Cm send .
The default heuristic is that the average block size
will be the current recordsize.
Override this value if most data in your dataset is not of that size
and you require accurate zfs send size estimates.
.
.It Sy zfs_sync_pass_deferred_free Ns = Ns Sy 2 Pq uint
Flushing of data to disk is done in passes.
Defer frees starting in this pass.
.
.It Sy zfs_spa_discard_memory_limit Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq int
Maximum memory used for prefetching a checkpoint's space map on each
vdev while discarding the checkpoint.
.
.It Sy zfs_special_class_metadata_reserve_pct Ns = Ns Sy 25 Ns % Pq uint
Only allow small data blocks to be allocated on the special and dedup vdev
types when the available free space percentage on these vdevs exceeds this
value.
This ensures reserved space is available for pool metadata as the
special vdevs approach capacity.
.
.It Sy zfs_sync_pass_dont_compress Ns = Ns Sy 8 Pq uint
Starting in this sync pass, disable compression (including of metadata).
With the default setting, in practice, we don't have this many sync passes,
so this has no effect.
.Pp
The original intent was that disabling compression would help the sync passes
to converge.
However, in practice, disabling compression increases
the average number of sync passes; because when we turn compression off,
many blocks' size will change, and thus we have to re-allocate
(not overwrite) them.
It also increases the number of
.Em 128 KiB
allocations (e.g. for indirect blocks and spacemaps)
because these will not be compressed.
The
.Em 128 KiB
allocations are especially detrimental to performance
on highly fragmented systems, which may have very few free segments of this
size,
and may need to load new metaslabs to satisfy these allocations.
.
.It Sy zfs_sync_pass_rewrite Ns = Ns Sy 2 Pq uint
Rewrite new block pointers starting in this pass.
.
.It Sy zfs_sync_taskq_batch_pct Ns = Ns Sy 75 Ns % Pq int
This controls the number of threads used by
.Sy dp_sync_taskq .
The default value of
.Sy 75%
will create a maximum of one thread per CPU.
.
.It Sy zfs_trim_extent_bytes_max Ns = Ns Sy 134217728 Ns B Po 128 MiB Pc Pq uint
Maximum size of TRIM command.
Larger ranges will be split into chunks no larger than this value before
issuing.
.
.It Sy zfs_trim_extent_bytes_min Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq uint
Minimum size of TRIM commands.
TRIM ranges smaller than this will be skipped,
unless they're part of a larger range which was chunked.
This is done because it's common for these small TRIMs
to negatively impact overall performance.
.
.It Sy zfs_trim_metaslab_skip Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Skip uninitialized metaslabs during the TRIM process.
This option is useful for pools constructed from large thinly-provisioned
devices
where TRIM operations are slow.
As a pool ages, an increasing fraction of the pool's metaslabs
will be initialized, progressively degrading the usefulness of this option.
This setting is stored when starting a manual TRIM and will
persist for the duration of the requested TRIM.
.
.It Sy zfs_trim_queue_limit Ns = Ns Sy 10 Pq uint
Maximum number of queued TRIMs outstanding per leaf vdev.
The number of concurrent TRIM commands issued to the device is controlled by
.Sy zfs_vdev_trim_min_active No and Sy zfs_vdev_trim_max_active .
.
.It Sy zfs_trim_txg_batch Ns = Ns Sy 32 Pq uint
The number of transaction groups' worth of frees which should be aggregated
before TRIM operations are issued to the device.
This setting represents a trade-off between issuing larger,
more efficient TRIM operations and the delay
before the recently trimmed space is available for use by the device.
.Pp
Increasing this value will allow frees to be aggregated for a longer time.
This will result is larger TRIM operations and potentially increased memory
usage.
Decreasing this value will have the opposite effect.
The default of
.Sy 32
was determined to be a reasonable compromise.
.
.It Sy zfs_txg_history Ns = Ns Sy 0 Pq uint
Historical statistics for this many latest TXGs will be available in
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /TXGs .
.
.It Sy zfs_txg_timeout Ns = Ns Sy 5 Ns s Pq uint
Flush dirty data to disk at least every this many seconds (maximum TXG
duration).
.
.It Sy zfs_vdev_aggregation_limit Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq uint
Max vdev I/O aggregation size.
.
.It Sy zfs_vdev_aggregation_limit_non_rotating Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq uint
Max vdev I/O aggregation size for non-rotating media.
.
.It Sy zfs_vdev_mirror_rotating_inc Ns = Ns Sy 0 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member when an I/O operation
immediately follows its predecessor on rotational vdevs
for the purpose of making decisions based on load.
.
.It Sy zfs_vdev_mirror_rotating_seek_inc Ns = Ns Sy 5 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member when an I/O operation
lacks locality as defined by
.Sy zfs_vdev_mirror_rotating_seek_offset .
Operations within this that are not immediately following the previous operation
are incremented by half.
.
.It Sy zfs_vdev_mirror_rotating_seek_offset Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq int
The maximum distance for the last queued I/O operation in which
the balancing algorithm considers an operation to have locality.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_mirror_non_rotating_inc Ns = Ns Sy 0 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member on non-rotational vdevs
when I/O operations do not immediately follow one another.
.
.It Sy zfs_vdev_mirror_non_rotating_seek_inc Ns = Ns Sy 1 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member when an I/O operation
lacks
locality as defined by the
.Sy zfs_vdev_mirror_rotating_seek_offset .
Operations within this that are not immediately following the previous operation
are incremented by half.
.
.It Sy zfs_vdev_read_gap_limit Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq uint
Aggregate read I/O operations if the on-disk gap between them is within this
threshold.
.
.It Sy zfs_vdev_write_gap_limit Ns = Ns Sy 4096 Ns B Po 4 KiB Pc Pq uint
Aggregate write I/O operations if the on-disk gap between them is within this
threshold.
.
.It Sy zfs_vdev_raidz_impl Ns = Ns Sy fastest Pq string
Select the raidz parity implementation to use.
.Pp
Variants that don't depend on CPU-specific features
may be selected on module load, as they are supported on all systems.
The remaining options may only be set after the module is loaded,
as they are available only if the implementations are compiled in
and supported on the running system.
.Pp
Once the module is loaded,
.Pa /sys/module/zfs/parameters/zfs_vdev_raidz_impl
will show the available options,
with the currently selected one enclosed in square brackets.
.Pp
.TS
lb l l .
fastest selected by built-in benchmark
original original implementation
scalar scalar implementation
sse2 SSE2 instruction set 64-bit x86
ssse3 SSSE3 instruction set 64-bit x86
avx2 AVX2 instruction set 64-bit x86
avx512f AVX512F instruction set 64-bit x86
avx512bw AVX512F & AVX512BW instruction sets 64-bit x86
aarch64_neon NEON Aarch64/64-bit ARMv8
aarch64_neonx2 NEON with more unrolling Aarch64/64-bit ARMv8
powerpc_altivec Altivec PowerPC
.TE
.
.It Sy zfs_vdev_scheduler Pq charp
.Sy DEPRECATED .
Prints warning to kernel log for compatibility.
.
.It Sy zfs_zevent_len_max Ns = Ns Sy 512 Pq uint
Max event queue length.
Events in the queue can be viewed with
.Xr zpool-events 8 .
.
.It Sy zfs_zevent_retain_max Ns = Ns Sy 2000 Pq int
Maximum recent zevent records to retain for duplicate checking.
Setting this to
.Sy 0
disables duplicate detection.
.
.It Sy zfs_zevent_retain_expire_secs Ns = Ns Sy 900 Ns s Po 15 min Pc Pq int
Lifespan for a recent ereport that was retained for duplicate checking.
.
.It Sy zfs_zil_clean_taskq_maxalloc Ns = Ns Sy 1048576 Pq int
The maximum number of taskq entries that are allowed to be cached.
When this limit is exceeded transaction records (itxs)
will be cleaned synchronously.
.
.It Sy zfs_zil_clean_taskq_minalloc Ns = Ns Sy 1024 Pq int
The number of taskq entries that are pre-populated when the taskq is first
created and are immediately available for use.
.
.It Sy zfs_zil_clean_taskq_nthr_pct Ns = Ns Sy 100 Ns % Pq int
This controls the number of threads used by
.Sy dp_zil_clean_taskq .
The default value of
.Sy 100%
will create a maximum of one thread per cpu.
.
.It Sy zil_maxblocksize Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq uint
This sets the maximum block size used by the ZIL.
On very fragmented pools, lowering this
.Pq typically to Sy 36 KiB
can improve performance.
.
.It Sy zil_min_commit_timeout Ns = Ns Sy 5000 Pq u64
This sets the minimum delay in nanoseconds ZIL care to delay block commit,
waiting for more records.
If ZIL writes are too fast, kernel may not be able sleep for so short interval,
increasing log latency above allowed by
.Sy zfs_commit_timeout_pct .
.
.It Sy zil_nocacheflush Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable the cache flush commands that are normally sent to disk by
the ZIL after an LWB write has completed.
Setting this will cause ZIL corruption on power loss
if a volatile out-of-order write cache is enabled.
.
.It Sy zil_replay_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable intent logging replay.
Can be disabled for recovery from corrupted ZIL.
.
.It Sy zil_slog_bulk Ns = Ns Sy 786432 Ns B Po 768 KiB Pc Pq u64
Limit SLOG write size per commit executed with synchronous priority.
Any writes above that will be executed with lower (asynchronous) priority
to limit potential SLOG device abuse by single active ZIL writer.
.
.It Sy zfs_zil_saxattr Ns = Ns Sy 1 Ns | Ns 0 Pq int
Setting this tunable to zero disables ZIL logging of new
.Sy xattr Ns = Ns Sy sa
records if the
.Sy org.openzfs:zilsaxattr
feature is enabled on the pool.
This would only be necessary to work around bugs in the ZIL logging or replay
code for this record type.
The tunable has no effect if the feature is disabled.
.
.It Sy zfs_embedded_slog_min_ms Ns = Ns Sy 64 Pq uint
Usually, one metaslab from each normal-class vdev is dedicated for use by
the ZIL to log synchronous writes.
However, if there are fewer than
.Sy zfs_embedded_slog_min_ms
metaslabs in the vdev, this functionality is disabled.
This ensures that we don't set aside an unreasonable amount of space for the
ZIL.
.
.It Sy zstd_earlyabort_pass Ns = Ns Sy 1 Pq uint
Whether heuristic for detection of incompressible data with zstd levels >= 3
using LZ4 and zstd-1 passes is enabled.
.
.It Sy zstd_abort_size Ns = Ns Sy 131072 Pq uint
Minimal uncompressed size (inclusive) of a record before the early abort
heuristic will be attempted.
.
.It Sy zio_deadman_log_all Ns = Ns Sy 0 Ns | Ns 1 Pq int
If non-zero, the zio deadman will produce debugging messages
.Pq see Sy zfs_dbgmsg_enable
for all zios, rather than only for leaf zios possessing a vdev.
This is meant to be used by developers to gain
diagnostic information for hang conditions which don't involve a mutex
or other locking primitive: typically conditions in which a thread in
the zio pipeline is looping indefinitely.
.
.It Sy zio_slow_io_ms Ns = Ns Sy 30000 Ns ms Po 30 s Pc Pq int
When an I/O operation takes more than this much time to complete,
it's marked as slow.
Each slow operation causes a delay zevent.
Slow I/O counters can be seen with
.Nm zpool Cm status Fl s .
.
.It Sy zio_dva_throttle_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Throttle block allocations in the I/O pipeline.
This allows for dynamic allocation distribution when devices are imbalanced.
When enabled, the maximum number of pending allocations per top-level vdev
is limited by
.Sy zfs_vdev_queue_depth_pct .
.
.It Sy zfs_xattr_compat Ns = Ns 0 Ns | Ns 1 Pq int
Control the naming scheme used when setting new xattrs in the user namespace.
If
.Sy 0
.Pq the default on Linux ,
user namespace xattr names are prefixed with the namespace, to be backwards
compatible with previous versions of ZFS on Linux.
If
.Sy 1
.Pq the default on Fx ,
user namespace xattr names are not prefixed, to be backwards compatible with
previous versions of ZFS on illumos and
.Fx .
.Pp
Either naming scheme can be read on this and future versions of ZFS, regardless
of this tunable, but legacy ZFS on illumos or
.Fx
are unable to read user namespace xattrs written in the Linux format, and
legacy versions of ZFS on Linux are unable to read user namespace xattrs written
in the legacy ZFS format.
.Pp
An existing xattr with the alternate naming scheme is removed when overwriting
the xattr so as to not accumulate duplicates.
.
.It Sy zio_requeue_io_start_cut_in_line Ns = Ns Sy 0 Ns | Ns 1 Pq int
Prioritize requeued I/O.
.
.It Sy zio_taskq_batch_pct Ns = Ns Sy 80 Ns % Pq uint
Percentage of online CPUs which will run a worker thread for I/O.
These workers are responsible for I/O work such as compression and
checksum calculations.
Fractional number of CPUs will be rounded down.
.Pp
The default value of
.Sy 80%
was chosen to avoid using all CPUs which can result in
latency issues and inconsistent application performance,
especially when slower compression and/or checksumming is enabled.
.
.It Sy zio_taskq_batch_tpq Ns = Ns Sy 0 Pq uint
Number of worker threads per taskq.
Lower values improve I/O ordering and CPU utilization,
while higher reduces lock contention.
.Pp
If
.Sy 0 ,
generate a system-dependent value close to 6 threads per taskq.
.
.It Sy zvol_inhibit_dev Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Do not create zvol device nodes.
This may slightly improve startup time on
systems with a very large number of zvols.
.
.It Sy zvol_major Ns = Ns Sy 230 Pq uint
Major number for zvol block devices.
.
.It Sy zvol_max_discard_blocks Ns = Ns Sy 16384 Pq long
Discard (TRIM) operations done on zvols will be done in batches of this
many blocks, where block size is determined by the
.Sy volblocksize
property of a zvol.
.
.It Sy zvol_prefetch_bytes Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq uint
When adding a zvol to the system, prefetch this many bytes
from the start and end of the volume.
Prefetching these regions of the volume is desirable,
because they are likely to be accessed immediately by
.Xr blkid 8
or the kernel partitioner.
.
.It Sy zvol_request_sync Ns = Ns Sy 0 Ns | Ns 1 Pq uint
When processing I/O requests for a zvol, submit them synchronously.
This effectively limits the queue depth to
.Em 1
for each I/O submitter.
When unset, requests are handled asynchronously by a thread pool.
The number of requests which can be handled concurrently is controlled by
.Sy zvol_threads .
.Sy zvol_request_sync
is ignored when running on a kernel that supports block multiqueue
.Pq Li blk-mq .
.
.It Sy zvol_threads Ns = Ns Sy 0 Pq uint
The number of system wide threads to use for processing zvol block IOs.
If
.Sy 0
(the default) then internally set
.Sy zvol_threads
to the number of CPUs present or 32 (whichever is greater).
.
.It Sy zvol_blk_mq_threads Ns = Ns Sy 0 Pq uint
The number of threads per zvol to use for queuing IO requests.
This parameter will only appear if your kernel supports
.Li blk-mq
and is only read and assigned to a zvol at zvol load time.
If
.Sy 0
(the default) then internally set
.Sy zvol_blk_mq_threads
to the number of CPUs present.
.
.It Sy zvol_use_blk_mq Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Set to
.Sy 1
to use the
.Li blk-mq
API for zvols.
Set to
.Sy 0
(the default) to use the legacy zvol APIs.
This setting can give better or worse zvol performance depending on
the workload.
This parameter will only appear if your kernel supports
.Li blk-mq
and is only read and assigned to a zvol at zvol load time.
.
.It Sy zvol_blk_mq_blocks_per_thread Ns = Ns Sy 8 Pq uint
If
.Sy zvol_use_blk_mq
is enabled, then process this number of
.Sy volblocksize Ns -sized blocks per zvol thread.
This tunable can be use to favor better performance for zvol reads (lower
values) or writes (higher values).
If set to
.Sy 0 ,
then the zvol layer will process the maximum number of blocks
per thread that it can.
This parameter will only appear if your kernel supports
.Li blk-mq
and is only applied at each zvol's load time.
.
.It Sy zvol_blk_mq_queue_depth Ns = Ns Sy 0 Pq uint
The queue_depth value for the zvol
.Li blk-mq
interface.
This parameter will only appear if your kernel supports
.Li blk-mq
and is only applied at each zvol's load time.
If
.Sy 0
(the default) then use the kernel's default queue depth.
Values are clamped to the kernel's
.Dv BLKDEV_MIN_RQ
and
.Dv BLKDEV_MAX_RQ Ns / Ns Dv BLKDEV_DEFAULT_RQ
limits.
.
.It Sy zvol_volmode Ns = Ns Sy 1 Pq uint
Defines zvol block devices behaviour when
.Sy volmode Ns = Ns Sy default :
.Bl -tag -compact -offset 4n -width "a"
.It Sy 1
.No equivalent to Sy full
.It Sy 2
.No equivalent to Sy dev
.It Sy 3
.No equivalent to Sy none
.El
.
.It Sy zvol_enforce_quotas Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Enable strict ZVOL quota enforcement.
The strict quota enforcement may have a performance impact.
.El
.
.Sh ZFS I/O SCHEDULER
ZFS issues I/O operations to leaf vdevs to satisfy and complete I/O operations.
The scheduler determines when and in what order those operations are issued.
The scheduler divides operations into five I/O classes,
prioritized in the following order: sync read, sync write, async read,
async write, and scrub/resilver.
Each queue defines the minimum and maximum number of concurrent operations
that may be issued to the device.
In addition, the device has an aggregate maximum,
.Sy zfs_vdev_max_active .
Note that the sum of the per-queue minima must not exceed the aggregate maximum.
If the sum of the per-queue maxima exceeds the aggregate maximum,
then the number of active operations may reach
.Sy zfs_vdev_max_active ,
in which case no further operations will be issued,
regardless of whether all per-queue minima have been met.
.Pp
For many physical devices, throughput increases with the number of
concurrent operations, but latency typically suffers.
Furthermore, physical devices typically have a limit
at which more concurrent operations have no
effect on throughput or can actually cause it to decrease.
.Pp
The scheduler selects the next operation to issue by first looking for an
I/O class whose minimum has not been satisfied.
Once all are satisfied and the aggregate maximum has not been hit,
the scheduler looks for classes whose maximum has not been satisfied.
Iteration through the I/O classes is done in the order specified above.
No further operations are issued
if the aggregate maximum number of concurrent operations has been hit,
or if there are no operations queued for an I/O class that has not hit its
maximum.
Every time an I/O operation is queued or an operation completes,
the scheduler looks for new operations to issue.
.Pp
In general, smaller
.Sy max_active Ns s
will lead to lower latency of synchronous operations.
Larger
.Sy max_active Ns s
may lead to higher overall throughput, depending on underlying storage.
.Pp
The ratio of the queues'
.Sy max_active Ns s
determines the balance of performance between reads, writes, and scrubs.
For example, increasing
.Sy zfs_vdev_scrub_max_active
will cause the scrub or resilver to complete more quickly,
but reads and writes to have higher latency and lower throughput.
.Pp
All I/O classes have a fixed maximum number of outstanding operations,
except for the async write class.
Asynchronous writes represent the data that is committed to stable storage
during the syncing stage for transaction groups.
Transaction groups enter the syncing state periodically,
so the number of queued async writes will quickly burst up
and then bleed down to zero.
Rather than servicing them as quickly as possible,
the I/O scheduler changes the maximum number of active async write operations
according to the amount of dirty data in the pool.
Since both throughput and latency typically increase with the number of
concurrent operations issued to physical devices, reducing the
burstiness in the number of simultaneous operations also stabilizes the
response time of operations from other queues, in particular synchronous ones.
In broad strokes, the I/O scheduler will issue more concurrent operations
from the async write queue as there is more dirty data in the pool.
.
.Ss Async Writes
The number of concurrent operations issued for the async write I/O class
follows a piece-wise linear function defined by a few adjustable points:
.Bd -literal
| o---------| <-- \fBzfs_vdev_async_write_max_active\fP
^ | /^ |
| | / | |
active | / | |
I/O | / | |
count | / | |
| / | |
|-------o | | <-- \fBzfs_vdev_async_write_min_active\fP
0|_______^______|_________|
0% | | 100% of \fBzfs_dirty_data_max\fP
| |
| `-- \fBzfs_vdev_async_write_active_max_dirty_percent\fP
`--------- \fBzfs_vdev_async_write_active_min_dirty_percent\fP
.Ed
.Pp
Until the amount of dirty data exceeds a minimum percentage of the dirty
data allowed in the pool, the I/O scheduler will limit the number of
concurrent operations to the minimum.
As that threshold is crossed, the number of concurrent operations issued
increases linearly to the maximum at the specified maximum percentage
of the dirty data allowed in the pool.
.Pp
Ideally, the amount of dirty data on a busy pool will stay in the sloped
part of the function between
.Sy zfs_vdev_async_write_active_min_dirty_percent
and
.Sy zfs_vdev_async_write_active_max_dirty_percent .
If it exceeds the maximum percentage,
this indicates that the rate of incoming data is
greater than the rate that the backend storage can handle.
In this case, we must further throttle incoming writes,
as described in the next section.
.
.Sh ZFS TRANSACTION DELAY
We delay transactions when we've determined that the backend storage
isn't able to accommodate the rate of incoming writes.
.Pp
If there is already a transaction waiting, we delay relative to when
that transaction will finish waiting.
This way the calculated delay time
is independent of the number of threads concurrently executing transactions.
.Pp
If we are the only waiter, wait relative to when the transaction started,
rather than the current time.
This credits the transaction for "time already served",
e.g. reading indirect blocks.
.Pp
The minimum time for a transaction to take is calculated as
.D1 min_time = min( Ns Sy zfs_delay_scale No \(mu Po Sy dirty No \- Sy min Pc / Po Sy max No \- Sy dirty Pc , 100ms)
.Pp
The delay has two degrees of freedom that can be adjusted via tunables.
The percentage of dirty data at which we start to delay is defined by
.Sy zfs_delay_min_dirty_percent .
This should typically be at or above
.Sy zfs_vdev_async_write_active_max_dirty_percent ,
so that we only start to delay after writing at full speed
has failed to keep up with the incoming write rate.
The scale of the curve is defined by
.Sy zfs_delay_scale .
Roughly speaking, this variable determines the amount of delay at the midpoint
of the curve.
.Bd -literal
delay
10ms +-------------------------------------------------------------*+
| *|
9ms + *+
| *|
8ms + *+
| * |
7ms + * +
| * |
6ms + * +
| * |
5ms + * +
| * |
4ms + * +
| * |
3ms + * +
| * |
2ms + (midpoint) * +
| | ** |
1ms + v *** +
| \fBzfs_delay_scale\fP ----------> ******** |
0 +-------------------------------------*********----------------+
0% <- \fBzfs_dirty_data_max\fP -> 100%
.Ed
.Pp
Note, that since the delay is added to the outstanding time remaining on the
most recent transaction it's effectively the inverse of IOPS.
Here, the midpoint of
.Em 500 us
translates to
.Em 2000 IOPS .
The shape of the curve
was chosen such that small changes in the amount of accumulated dirty data
in the first three quarters of the curve yield relatively small differences
in the amount of delay.
.Pp
The effects can be easier to understand when the amount of delay is
represented on a logarithmic scale:
.Bd -literal
delay
100ms +-------------------------------------------------------------++
+ +
| |
+ *+
10ms + *+
+ ** +
| (midpoint) ** |
+ | ** +
1ms + v **** +
+ \fBzfs_delay_scale\fP ----------> ***** +
| **** |
+ **** +
100us + ** +
+ * +
| * |
+ * +
10us + * +
+ +
| |
+ +
+--------------------------------------------------------------+
0% <- \fBzfs_dirty_data_max\fP -> 100%
.Ed
.Pp
Note here that only as the amount of dirty data approaches its limit does
the delay start to increase rapidly.
The goal of a properly tuned system should be to keep the amount of dirty data
out of that range by first ensuring that the appropriate limits are set
for the I/O scheduler to reach optimal throughput on the back-end storage,
and then by changing the value of
.Sy zfs_delay_scale
to increase the steepness of the curve.
diff --git a/sys/contrib/openzfs/man/man7/zfsprops.7 b/sys/contrib/openzfs/man/man7/zfsprops.7
index 8f6b919cfc0b..51ddd85eb79e 100644
--- a/sys/contrib/openzfs/man/man7/zfsprops.7
+++ b/sys/contrib/openzfs/man/man7/zfsprops.7
@@ -1,2112 +1,2114 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or https://opensource.org/licenses/CDDL-1.0.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2009 Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright 2011 Joshua M. Clulow <josh@sysmgr.org>
.\" Copyright (c) 2011, 2019 by Delphix. All rights reserved.
.\" Copyright (c) 2011, Pawel Jakub Dawidek <pjd@FreeBSD.org>
.\" Copyright (c) 2012, Glen Barber <gjb@FreeBSD.org>
.\" Copyright (c) 2012, Bryan Drewery <bdrewery@FreeBSD.org>
.\" Copyright (c) 2013, Steven Hartland <smh@FreeBSD.org>
.\" Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
.\" Copyright (c) 2014, Joyent, Inc. All rights reserved.
.\" Copyright (c) 2014 by Adam Stevko. All rights reserved.
.\" Copyright (c) 2014 Integros [integros.com]
.\" Copyright (c) 2016 Nexenta Systems, Inc. All Rights Reserved.
.\" Copyright (c) 2014, Xin LI <delphij@FreeBSD.org>
.\" Copyright (c) 2014-2015, The FreeBSD Foundation, All Rights Reserved.
.\" Copyright 2019 Richard Laager. All rights reserved.
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\" Copyright (c) 2019, Kjeld Schouten-Lebbing
.\" Copyright (c) 2022 Hewlett Packard Enterprise Development LP.
.\"
-.Dd April 18, 2023
+.Dd August 8, 2023
.Dt ZFSPROPS 7
.Os
.
.Sh NAME
.Nm zfsprops
.Nd native and user-defined properties of ZFS datasets
.
.Sh DESCRIPTION
Properties are divided into two types, native properties and user-defined
.Po or
.Qq user
.Pc
properties.
Native properties either export internal statistics or control ZFS behavior.
In addition, native properties are either editable or read-only.
User properties have no effect on ZFS behavior, but you can use them to annotate
datasets in a way that is meaningful in your environment.
For more information about user properties, see the
.Sx User Properties
section, below.
.
.Ss Native Properties
Every dataset has a set of properties that export statistics about the dataset
as well as control various behaviors.
Properties are inherited from the parent unless overridden by the child.
Some properties apply only to certain types of datasets
.Pq file systems, volumes, or snapshots .
.Pp
The values of numeric properties can be specified using human-readable suffixes
.Po for example,
.Sy k ,
.Sy KB ,
.Sy M ,
.Sy Gb ,
and so forth, up to
.Sy Z
for zettabyte
.Pc .
The following are all valid
.Pq and equal
specifications:
.Li 1536M ,
.Li 1.5g ,
.Li 1.50GB .
.Pp
The values of non-numeric properties are case sensitive and must be lowercase,
except for
.Sy mountpoint ,
.Sy sharenfs ,
and
.Sy sharesmb .
.Pp
The following native properties consist of read-only statistics about the
dataset.
These properties can be neither set, nor inherited.
Native properties apply to all dataset types unless otherwise noted.
.Bl -tag -width "usedbyrefreservation"
.It Sy available
The amount of space available to the dataset and all its children, assuming that
there is no other activity in the pool.
Because space is shared within a pool, availability can be limited by any number
of factors, including physical pool size, quotas, reservations, or other
datasets within the pool.
.Pp
This property can also be referred to by its shortened column name,
.Sy avail .
.It Sy compressratio
For non-snapshots, the compression ratio achieved for the
.Sy used
space of this dataset, expressed as a multiplier.
The
.Sy used
property includes descendant datasets, and, for clones, does not include the
space shared with the origin snapshot.
For snapshots, the
.Sy compressratio
is the same as the
.Sy refcompressratio
property.
Compression can be turned on by running:
.Nm zfs Cm set Sy compression Ns = Ns Sy on Ar dataset .
The default value is
.Sy off .
.It Sy createtxg
The transaction group (txg) in which the dataset was created.
Bookmarks have the same
.Sy createtxg
as the snapshot they are initially tied to.
This property is suitable for ordering a list of snapshots,
e.g. for incremental send and receive.
.It Sy creation
The time this dataset was created.
.It Sy clones
For snapshots, this property is a comma-separated list of filesystems or volumes
which are clones of this snapshot.
The clones'
.Sy origin
property is this snapshot.
If the
.Sy clones
property is not empty, then this snapshot can not be destroyed
.Po even with the
.Fl r
or
.Fl f
options
.Pc .
The roles of origin and clone can be swapped by promoting the clone with the
.Nm zfs Cm promote
command.
.It Sy defer_destroy
This property is
.Sy on
if the snapshot has been marked for deferred destroy by using the
.Nm zfs Cm destroy Fl d
command.
Otherwise, the property is
.Sy off .
.It Sy encryptionroot
For encrypted datasets, indicates where the dataset is currently inheriting its
encryption key from.
Loading or unloading a key for the
.Sy encryptionroot
will implicitly load / unload the key for any inheriting datasets (see
.Nm zfs Cm load-key
and
.Nm zfs Cm unload-key
for details).
Clones will always share an
encryption key with their origin.
See the
.Sx Encryption
section of
.Xr zfs-load-key 8
for details.
.It Sy filesystem_count
The total number of filesystems and volumes that exist under this location in
the dataset tree.
This value is only available when a
.Sy filesystem_limit
has been set somewhere in the tree under which the dataset resides.
.It Sy keystatus
Indicates if an encryption key is currently loaded into ZFS.
The possible values are
.Sy none ,
.Sy available ,
and
.Sy unavailable .
See
.Nm zfs Cm load-key
and
.Nm zfs Cm unload-key .
.It Sy guid
The 64 bit GUID of this dataset or bookmark which does not change over its
entire lifetime.
When a snapshot is sent to another pool, the received snapshot has the same
GUID.
Thus, the
.Sy guid
is suitable to identify a snapshot across pools.
.It Sy logicalreferenced
The amount of space that is
.Qq logically
accessible by this dataset.
See the
.Sy referenced
property.
The logical space ignores the effect of the
.Sy compression
and
.Sy copies
properties, giving a quantity closer to the amount of data that applications
see.
However, it does include space consumed by metadata.
.Pp
This property can also be referred to by its shortened column name,
.Sy lrefer .
.It Sy logicalused
The amount of space that is
.Qq logically
consumed by this dataset and all its descendents.
See the
.Sy used
property.
The logical space ignores the effect of the
.Sy compression
and
.Sy copies
properties, giving a quantity closer to the amount of data that applications
see.
However, it does include space consumed by metadata.
.Pp
This property can also be referred to by its shortened column name,
.Sy lused .
.It Sy mounted
For file systems, indicates whether the file system is currently mounted.
This property can be either
.Sy yes
or
.Sy no .
.It Sy objsetid
A unique identifier for this dataset within the pool.
Unlike the dataset's
.Sy guid , No the Sy objsetid
of a dataset is not transferred to other pools when the snapshot is copied
with a send/receive operation.
The
.Sy objsetid
can be reused (for a new dataset) after the dataset is deleted.
.It Sy origin
For cloned file systems or volumes, the snapshot from which the clone was
created.
See also the
.Sy clones
property.
.It Sy receive_resume_token
For filesystems or volumes which have saved partially-completed state from
.Nm zfs Cm receive Fl s ,
this opaque token can be provided to
.Nm zfs Cm send Fl t
to resume and complete the
.Nm zfs Cm receive .
.It Sy redact_snaps
For bookmarks, this is the list of snapshot guids the bookmark contains a
redaction
list for.
For snapshots, this is the list of snapshot guids the snapshot is redacted with
respect to.
.It Sy referenced
The amount of data that is accessible by this dataset, which may or may not be
shared with other datasets in the pool.
When a snapshot or clone is created, it initially references the same amount of
space as the file system or snapshot it was created from, since its contents are
identical.
.Pp
This property can also be referred to by its shortened column name,
.Sy refer .
.It Sy refcompressratio
The compression ratio achieved for the
.Sy referenced
space of this dataset, expressed as a multiplier.
See also the
.Sy compressratio
property.
.It Sy snapshot_count
The total number of snapshots that exist under this location in the dataset
tree.
This value is only available when a
.Sy snapshot_limit
has been set somewhere in the tree under which the dataset resides.
.It Sy type
The type of dataset:
.Sy filesystem ,
.Sy volume ,
.Sy snapshot ,
or
.Sy bookmark .
.It Sy used
The amount of space consumed by this dataset and all its descendents.
This is the value that is checked against this dataset's quota and reservation.
The space used does not include this dataset's reservation, but does take into
account the reservations of any descendent datasets.
The amount of space that a dataset consumes from its parent, as well as the
amount of space that is freed if this dataset is recursively destroyed, is the
greater of its space used and its reservation.
.Pp
The used space of a snapshot
.Po see the
.Sx Snapshots
section of
.Xr zfsconcepts 7
.Pc
is space that is referenced exclusively by this snapshot.
If this snapshot is destroyed, the amount of
.Sy used
space will be freed.
Space that is shared by multiple snapshots isn't accounted for in this metric.
When a snapshot is destroyed, space that was previously shared with this
snapshot can become unique to snapshots adjacent to it, thus changing the used
space of those snapshots.
The used space of the latest snapshot can also be affected by changes in the
file system.
Note that the
.Sy used
space of a snapshot is a subset of the
.Sy written
space of the snapshot.
.Pp
The amount of space used, available, or referenced does not take into account
pending changes.
Pending changes are generally accounted for within a few seconds.
Committing a change to a disk using
.Xr fsync 2
or
.Sy O_SYNC
does not necessarily guarantee that the space usage information is updated
immediately.
.It Sy usedby*
The
.Sy usedby*
properties decompose the
.Sy used
properties into the various reasons that space is used.
Specifically,
.Sy used No =
.Sy usedbychildren No +
.Sy usedbydataset No +
.Sy usedbyrefreservation No +
.Sy usedbysnapshots .
These properties are only available for datasets created on
.Nm zpool
.Qo version 13 Qc
pools.
.It Sy usedbychildren
The amount of space used by children of this dataset, which would be freed if
all the dataset's children were destroyed.
.It Sy usedbydataset
The amount of space used by this dataset itself, which would be freed if the
dataset were destroyed
.Po after first removing any
.Sy refreservation
and destroying any necessary snapshots or descendents
.Pc .
.It Sy usedbyrefreservation
The amount of space used by a
.Sy refreservation
set on this dataset, which would be freed if the
.Sy refreservation
was removed.
.It Sy usedbysnapshots
The amount of space consumed by snapshots of this dataset.
In particular, it is the amount of space that would be freed if all of this
dataset's snapshots were destroyed.
Note that this is not simply the sum of the snapshots'
.Sy used
properties because space can be shared by multiple snapshots.
.It Sy userused Ns @ Ns Ar user
The amount of space consumed by the specified user in this dataset.
Space is charged to the owner of each file, as displayed by
.Nm ls Fl l .
The amount of space charged is displayed by
.Nm du No and Nm ls Fl s .
See the
.Nm zfs Cm userspace
command for more information.
.Pp
Unprivileged users can access only their own space usage.
The root user, or a user who has been granted the
.Sy userused
privilege with
.Nm zfs Cm allow ,
can access everyone's usage.
.Pp
The
.Sy userused Ns @ Ns Ar …
properties are not displayed by
.Nm zfs Cm get Sy all .
The user's name must be appended after the
.Sy @
symbol, using one of the following forms:
.Bl -bullet -compact -offset 4n
.It
POSIX name
.Pq Qq joe
.It
POSIX numeric ID
.Pq Qq 789
.It
SID name
.Pq Qq joe.smith@mydomain
.It
SID numeric ID
.Pq Qq S-1-123-456-789
.El
.Pp
Files created on Linux always have POSIX owners.
.It Sy userobjused Ns @ Ns Ar user
The
.Sy userobjused
property is similar to
.Sy userused
but instead it counts the number of objects consumed by a user.
This property counts all objects allocated on behalf of the user,
it may differ from the results of system tools such as
.Nm df Fl i .
.Pp
When the property
.Sy xattr Ns = Ns Sy on
is set on a file system additional objects will be created per-file to store
extended attributes.
These additional objects are reflected in the
.Sy userobjused
value and are counted against the user's
.Sy userobjquota .
When a file system is configured to use
.Sy xattr Ns = Ns Sy sa
no additional internal objects are normally required.
.It Sy userrefs
This property is set to the number of user holds on this snapshot.
User holds are set by using the
.Nm zfs Cm hold
command.
.It Sy groupused Ns @ Ns Ar group
The amount of space consumed by the specified group in this dataset.
Space is charged to the group of each file, as displayed by
.Nm ls Fl l .
See the
.Sy userused Ns @ Ns Ar user
property for more information.
.Pp
Unprivileged users can only access their own groups' space usage.
The root user, or a user who has been granted the
.Sy groupused
privilege with
.Nm zfs Cm allow ,
can access all groups' usage.
.It Sy groupobjused Ns @ Ns Ar group
The number of objects consumed by the specified group in this dataset.
Multiple objects may be charged to the group for each file when extended
attributes are in use.
See the
.Sy userobjused Ns @ Ns Ar user
property for more information.
.Pp
Unprivileged users can only access their own groups' space usage.
The root user, or a user who has been granted the
.Sy groupobjused
privilege with
.Nm zfs Cm allow ,
can access all groups' usage.
.It Sy projectused Ns @ Ns Ar project
The amount of space consumed by the specified project in this dataset.
Project is identified via the project identifier (ID) that is object-based
numeral attribute.
An object can inherit the project ID from its parent object (if the
parent has the flag of inherit project ID that can be set and changed via
.Nm chattr Fl /+P
or
.Nm zfs project Fl s )
when being created.
The privileged user can set and change object's project
ID via
.Nm chattr Fl p
or
.Nm zfs project Fl s
anytime.
Space is charged to the project of each file, as displayed by
.Nm lsattr Fl p
or
.Nm zfs project .
See the
.Sy userused Ns @ Ns Ar user
property for more information.
.Pp
The root user, or a user who has been granted the
.Sy projectused
privilege with
.Nm zfs allow ,
can access all projects' usage.
.It Sy projectobjused Ns @ Ns Ar project
The
.Sy projectobjused
is similar to
.Sy projectused
but instead it counts the number of objects consumed by project.
When the property
.Sy xattr Ns = Ns Sy on
is set on a fileset, ZFS will create additional objects per-file to store
extended attributes.
These additional objects are reflected in the
.Sy projectobjused
value and are counted against the project's
.Sy projectobjquota .
When a filesystem is configured to use
.Sy xattr Ns = Ns Sy sa
no additional internal objects are required.
See the
.Sy userobjused Ns @ Ns Ar user
property for more information.
.Pp
The root user, or a user who has been granted the
.Sy projectobjused
privilege with
.Nm zfs allow ,
can access all projects' objects usage.
.It Sy snapshots_changed
Provides a mechanism to quickly determine whether snapshot list has
changed without having to mount a dataset or iterate the snapshot list.
Specifies the time at which a snapshot for a dataset was last
created or deleted.
.Pp
This allows us to be more efficient how often we query snapshots.
The property is persistent across mount and unmount operations only if the
.Sy extensible_dataset
feature is enabled.
.It Sy volblocksize
For volumes, specifies the block size of the volume.
The
.Sy blocksize
cannot be changed once the volume has been written, so it should be set at
volume creation time.
The default
.Sy blocksize
for volumes is 16 Kbytes.
Any power of 2 from 512 bytes to 128 Kbytes is valid.
.Pp
This property can also be referred to by its shortened column name,
.Sy volblock .
.It Sy written
The amount of space
.Sy referenced
by this dataset, that was written since the previous snapshot
.Pq i.e. that is not referenced by the previous snapshot .
.It Sy written Ns @ Ns Ar snapshot
The amount of
.Sy referenced
space written to this dataset since the specified snapshot.
This is the space that is referenced by this dataset but was not referenced by
the specified snapshot.
.Pp
The
.Ar snapshot
may be specified as a short snapshot name
.Pq just the part after the Sy @ ,
in which case it will be interpreted as a snapshot in the same filesystem as
this dataset.
The
.Ar snapshot
may be a full snapshot name
.Pq Ar filesystem Ns @ Ns Ar snapshot ,
which for clones may be a snapshot in the origin's filesystem
.Pq or the origin of the origin's filesystem, etc.
.El
.Pp
The following native properties can be used to change the behavior of a ZFS
dataset.
.Bl -tag -width ""
.It Xo
.Sy aclinherit Ns = Ns Sy discard Ns | Ns Sy noallow Ns | Ns
.Sy restricted Ns | Ns Sy passthrough Ns | Ns Sy passthrough-x
.Xc
Controls how ACEs are inherited when files and directories are created.
.Bl -tag -compact -offset 4n -width "passthrough-x"
.It Sy discard
does not inherit any ACEs.
.It Sy noallow
only inherits inheritable ACEs that specify
.Qq deny
permissions.
.It Sy restricted
default, removes the
.Sy write_acl
and
.Sy write_owner
permissions when the ACE is inherited.
.It Sy passthrough
inherits all inheritable ACEs without any modifications.
.It Sy passthrough-x
same meaning as
.Sy passthrough ,
except that the
.Sy owner@ , group@ , No and Sy everyone@
ACEs inherit the execute permission only if the file creation mode also requests
the execute bit.
.El
.Pp
When the property value is set to
.Sy passthrough ,
files are created with a mode determined by the inheritable ACEs.
If no inheritable ACEs exist that affect the mode, then the mode is set in
accordance to the requested mode from the application.
.Pp
The
.Sy aclinherit
property does not apply to POSIX ACLs.
.It Xo
.Sy aclmode Ns = Ns Sy discard Ns | Ns Sy groupmask Ns | Ns
.Sy passthrough Ns | Ns Sy restricted Ns
.Xc
Controls how an ACL is modified during chmod(2) and how inherited ACEs
are modified by the file creation mode:
.Bl -tag -compact -offset 4n -width "passthrough"
.It Sy discard
default, deletes all
.Sy ACEs
except for those representing
the mode of the file or directory requested by
.Xr chmod 2 .
.It Sy groupmask
reduces permissions granted in all
.Sy ALLOW
entries found in the
.Sy ACL
such that they are no greater than the group permissions specified by
.Xr chmod 2 .
.It Sy passthrough
indicates that no changes are made to the ACL other than creating or updating
the necessary ACL entries to represent the new mode of the file or directory.
.It Sy restricted
will cause the
.Xr chmod 2
operation to return an error when used on any file or directory which has
a non-trivial ACL whose entries can not be represented by a mode.
.Xr chmod 2
is required to change the set user ID, set group ID, or sticky bits on a file
or directory, as they do not have equivalent ACL entries.
In order to use
.Xr chmod 2
on a file or directory with a non-trivial ACL when
.Sy aclmode
is set to
.Sy restricted ,
you must first remove all ACL entries which do not represent the current mode.
.El
.It Sy acltype Ns = Ns Sy off Ns | Ns Sy nfsv4 Ns | Ns Sy posix
Controls whether ACLs are enabled and if so what type of ACL to use.
When this property is set to a type of ACL not supported by the current
platform, the behavior is the same as if it were set to
.Sy off .
.Bl -tag -compact -offset 4n -width "posixacl"
.It Sy off
default on Linux, when a file system has the
.Sy acltype
property set to off then ACLs are disabled.
.It Sy noacl
an alias for
.Sy off
.It Sy nfsv4
default on
.Fx ,
indicates that NFSv4-style ZFS ACLs should be used.
These ACLs can be managed with the
.Xr getfacl 1
and
.Xr setfacl 1 .
The
.Sy nfsv4
ZFS ACL type is not yet supported on Linux.
.It Sy posix
indicates POSIX ACLs should be used.
POSIX ACLs are specific to Linux and are not functional on other platforms.
POSIX ACLs are stored as an extended
attribute and therefore will not overwrite any existing NFSv4 ACLs which
may be set.
.It Sy posixacl
an alias for
.Sy posix
.El
.Pp
To obtain the best performance when setting
.Sy posix
users are strongly encouraged to set the
.Sy xattr Ns = Ns Sy sa
property.
This will result in the POSIX ACL being stored more efficiently on disk.
But as a consequence, all new extended attributes will only be
accessible from OpenZFS implementations which support the
.Sy xattr Ns = Ns Sy sa
property.
See the
.Sy xattr
property for more details.
.It Sy atime Ns = Ns Sy on Ns | Ns Sy off
Controls whether the access time for files is updated when they are read.
Turning this property off avoids producing write traffic when reading files and
can result in significant performance gains, though it might confuse mailers
and other similar utilities.
The values
.Sy on
and
.Sy off
are equivalent to the
.Sy atime
and
.Sy noatime
mount options.
The default value is
.Sy on .
See also
.Sy relatime
below.
.It Sy canmount Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Sy noauto
If this property is set to
.Sy off ,
the file system cannot be mounted, and is ignored by
.Nm zfs Cm mount Fl a .
Setting this property to
.Sy off
is similar to setting the
.Sy mountpoint
property to
.Sy none ,
except that the dataset still has a normal
.Sy mountpoint
property, which can be inherited.
Setting this property to
.Sy off
allows datasets to be used solely as a mechanism to inherit properties.
One example of setting
.Sy canmount Ns = Ns Sy off
is to have two datasets with the same
.Sy mountpoint ,
so that the children of both datasets appear in the same directory, but might
have different inherited characteristics.
.Pp
When set to
.Sy noauto ,
a dataset can only be mounted and unmounted explicitly.
The dataset is not mounted automatically when the dataset is created or
imported, nor is it mounted by the
.Nm zfs Cm mount Fl a
command or unmounted by the
.Nm zfs Cm unmount Fl a
command.
.Pp
This property is not inherited.
.It Xo
.Sy checksum Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Sy fletcher2 Ns | Ns
.Sy fletcher4 Ns | Ns Sy sha256 Ns | Ns Sy noparity Ns | Ns
.Sy sha512 Ns | Ns Sy skein Ns | Ns Sy edonr Ns | Ns Sy blake3
.Xc
Controls the checksum used to verify data integrity.
The default value is
.Sy on ,
which automatically selects an appropriate algorithm
.Po currently,
.Sy fletcher4 ,
but this may change in future releases
.Pc .
The value
.Sy off
disables integrity checking on user data.
The value
.Sy noparity
not only disables integrity but also disables maintaining parity for user data.
This setting is used internally by a dump device residing on a RAID-Z pool and
should not be used by any other dataset.
Disabling checksums is
.Em NOT
a recommended practice.
.Pp
The
.Sy sha512 ,
.Sy skein ,
.Sy edonr ,
and
.Sy blake3
checksum algorithms require enabling the appropriate features on the pool.
.Pp
Please see
.Xr zpool-features 7
for more information on these algorithms.
.Pp
Changing this property affects only newly-written data.
.It Xo
.Sy compression Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Sy gzip Ns | Ns
.Sy gzip- Ns Ar N Ns | Ns Sy lz4 Ns | Ns Sy lzjb Ns | Ns Sy zle Ns | Ns Sy zstd Ns | Ns
.Sy zstd- Ns Ar N Ns | Ns Sy zstd-fast Ns | Ns Sy zstd-fast- Ns Ar N
.Xc
Controls the compression algorithm used for this dataset.
.Pp
When set to
.Sy on
(the default), indicates that the current default compression algorithm should
be used.
The default balances compression and decompression speed, with compression ratio
and is expected to work well on a wide variety of workloads.
Unlike all other settings for this property,
.Sy on
does not select a fixed compression type.
As new compression algorithms are added to ZFS and enabled on a pool, the
default compression algorithm may change.
The current default compression algorithm is either
.Sy lzjb
or, if the
.Sy lz4_compress
feature is enabled,
.Sy lz4 .
.Pp
The
.Sy lz4
compression algorithm is a high-performance replacement for the
.Sy lzjb
algorithm.
It features significantly faster compression and decompression, as well as a
moderately higher compression ratio than
.Sy lzjb ,
but can only be used on pools with the
.Sy lz4_compress
feature set to
.Sy enabled .
See
.Xr zpool-features 7
for details on ZFS feature flags and the
.Sy lz4_compress
feature.
.Pp
The
.Sy lzjb
compression algorithm is optimized for performance while providing decent data
compression.
.Pp
The
.Sy gzip
compression algorithm uses the same compression as the
.Xr gzip 1
command.
You can specify the
.Sy gzip
level by using the value
.Sy gzip- Ns Ar N ,
where
.Ar N
is an integer from 1
.Pq fastest
to 9
.Pq best compression ratio .
Currently,
.Sy gzip
is equivalent to
.Sy gzip-6
.Po which is also the default for
.Xr gzip 1
.Pc .
.Pp
The
.Sy zstd
compression algorithm provides both high compression ratios and good
performance.
You can specify the
.Sy zstd
level by using the value
.Sy zstd- Ns Ar N ,
where
.Ar N
is an integer from 1
.Pq fastest
to 19
.Pq best compression ratio .
.Sy zstd
is equivalent to
.Sy zstd-3 .
.Pp
Faster speeds at the cost of the compression ratio can be requested by
setting a negative
.Sy zstd
level.
This is done using
.Sy zstd-fast- Ns Ar N ,
where
.Ar N
is an integer in
.Bq Sy 1 Ns - Ns Sy 10 , 20 , 30 , No … , Sy 100 , 500 , 1000
which maps to a negative
.Sy zstd
level.
The lower the level the faster the compression \(em
.Sy 1000
provides the fastest compression and lowest compression ratio.
.Sy zstd-fast
is equivalent to
.Sy zstd-fast- Ns Ar 1 .
.Pp
The
.Sy zle
compression algorithm compresses runs of zeros.
.Pp
This property can also be referred to by its shortened column name
.Sy compress .
Changing this property affects only newly-written data.
.Pp
When any setting except
.Sy off
is selected, compression will explicitly check for blocks consisting of only
zeroes (the NUL byte).
When a zero-filled block is detected, it is stored as
a hole and not compressed using the indicated compression algorithm.
.Pp
Any block being compressed must be no larger than 7/8 of its original size
after compression, otherwise the compression will not be considered worthwhile
and the block saved uncompressed.
Note that when the logical block is less than
8 times the disk sector size this effectively reduces the necessary compression
ratio; for example, 8 KiB blocks on disks with 4 KiB disk sectors must compress
to 1/2
or less of their original size.
.It Xo
.Sy context Ns = Ns Sy none Ns | Ns
.Ar SELinux-User : Ns Ar SELinux-Role : Ns Ar SELinux-Type : Ns Ar Sensitivity-Level
.Xc
This flag sets the SELinux context for all files in the file system under
a mount point for that file system.
See
.Xr selinux 8
for more information.
.It Xo
.Sy fscontext Ns = Ns Sy none Ns | Ns
.Ar SELinux-User : Ns Ar SELinux-Role : Ns Ar SELinux-Type : Ns Ar Sensitivity-Level
.Xc
This flag sets the SELinux context for the file system file system being
mounted.
See
.Xr selinux 8
for more information.
.It Xo
.Sy defcontext Ns = Ns Sy none Ns | Ns
.Ar SELinux-User : Ns Ar SELinux-Role : Ns Ar SELinux-Type : Ns Ar Sensitivity-Level
.Xc
This flag sets the SELinux default context for unlabeled files.
See
.Xr selinux 8
for more information.
.It Xo
.Sy rootcontext Ns = Ns Sy none Ns | Ns
.Ar SELinux-User : Ns Ar SELinux-Role : Ns Ar SELinux-Type : Ns Ar Sensitivity-Level
.Xc
This flag sets the SELinux context for the root inode of the file system.
See
.Xr selinux 8
for more information.
.It Sy copies Ns = Ns Sy 1 Ns | Ns Sy 2 Ns | Ns Sy 3
Controls the number of copies of data stored for this dataset.
These copies are in addition to any redundancy provided by the pool, for
example, mirroring or RAID-Z.
The copies are stored on different disks, if possible.
The space used by multiple copies is charged to the associated file and dataset,
changing the
.Sy used
property and counting against quotas and reservations.
.Pp
Changing this property only affects newly-written data.
Therefore, set this property at file system creation time by using the
.Fl o Sy copies Ns = Ns Ar N
option.
.Pp
Remember that ZFS will not import a pool with a missing top-level vdev.
Do
.Em NOT
create, for example a two-disk striped pool and set
.Sy copies Ns = Ns Ar 2
on some datasets thinking you have setup redundancy for them.
When a disk fails you will not be able to import the pool
and will have lost all of your data.
.Pp
Encrypted datasets may not have
.Sy copies Ns = Ns Ar 3
since the implementation stores some encryption metadata where the third copy
would normally be.
.It Sy devices Ns = Ns Sy on Ns | Ns Sy off
Controls whether device nodes can be opened on this file system.
The default value is
.Sy on .
The values
.Sy on
and
.Sy off
are equivalent to the
.Sy dev
and
.Sy nodev
mount options.
.It Xo
.Sy dedup Ns = Ns Sy off Ns | Ns Sy on Ns | Ns Sy verify Ns | Ns
.Sy sha256 Ns Oo , Ns Sy verify Oc Ns | Ns Sy sha512 Ns Oo , Ns Sy verify Oc Ns | Ns Sy skein Ns Oo , Ns Sy verify Oc Ns | Ns
.Sy edonr , Ns Sy verify Ns | Ns Sy blake3 Ns Oo , Ns Sy verify Oc Ns
.Xc
Configures deduplication for a dataset.
The default value is
.Sy off .
The default deduplication checksum is
.Sy sha256
(this may change in the future).
When
.Sy dedup
is enabled, the checksum defined here overrides the
.Sy checksum
property.
Setting the value to
.Sy verify
has the same effect as the setting
.Sy sha256 , Ns Sy verify .
.Pp
If set to
.Sy verify ,
ZFS will do a byte-to-byte comparison in case of two blocks having the same
signature to make sure the block contents are identical.
Specifying
.Sy verify
is mandatory for the
.Sy edonr
algorithm.
.Pp
Unless necessary, deduplication should
.Em not
be enabled on a system.
See the
.Sx Deduplication
section of
.Xr zfsconcepts 7 .
.It Xo
.Sy dnodesize Ns = Ns Sy legacy Ns | Ns Sy auto Ns | Ns Sy 1k Ns | Ns
.Sy 2k Ns | Ns Sy 4k Ns | Ns Sy 8k Ns | Ns Sy 16k
.Xc
Specifies a compatibility mode or literal value for the size of dnodes in the
file system.
The default value is
.Sy legacy .
Setting this property to a value other than
.Sy legacy No requires the Sy large_dnode No pool feature to be enabled .
.Pp
Consider setting
.Sy dnodesize
to
.Sy auto
if the dataset uses the
.Sy xattr Ns = Ns Sy sa
property setting and the workload makes heavy use of extended attributes.
This
may be applicable to SELinux-enabled systems, Lustre servers, and Samba
servers, for example.
Literal values are supported for cases where the optimal
size is known in advance and for performance testing.
.Pp
Leave
.Sy dnodesize
set to
.Sy legacy
if you need to receive a send stream of this dataset on a pool that doesn't
enable the
.Sy large_dnode
feature, or if you need to import this pool on a system that doesn't support the
.Sy large_dnode No feature .
.Pp
This property can also be referred to by its shortened column name,
.Sy dnsize .
.It Xo
.Sy encryption Ns = Ns Sy off Ns | Ns Sy on Ns | Ns Sy aes-128-ccm Ns | Ns
.Sy aes-192-ccm Ns | Ns Sy aes-256-ccm Ns | Ns Sy aes-128-gcm Ns | Ns
.Sy aes-192-gcm Ns | Ns Sy aes-256-gcm
.Xc
Controls the encryption cipher suite (block cipher, key length, and mode) used
for this dataset.
Requires the
.Sy encryption
feature to be enabled on the pool.
Requires a
.Sy keyformat
to be set at dataset creation time.
.Pp
Selecting
.Sy encryption Ns = Ns Sy on
when creating a dataset indicates that the default encryption suite will be
selected, which is currently
.Sy aes-256-gcm .
In order to provide consistent data protection, encryption must be specified at
dataset creation time and it cannot be changed afterwards.
.Pp
For more details and caveats about encryption see the
.Sx Encryption
section of
.Xr zfs-load-key 8 .
.It Sy keyformat Ns = Ns Sy raw Ns | Ns Sy hex Ns | Ns Sy passphrase
Controls what format the user's encryption key will be provided as.
This property is only set when the dataset is encrypted.
.Pp
Raw keys and hex keys must be 32 bytes long (regardless of the chosen
encryption suite) and must be randomly generated.
A raw key can be generated with the following command:
.Dl # Nm dd Sy if=/dev/urandom bs=32 count=1 Sy of= Ns Pa /path/to/output/key
.Pp
Passphrases must be between 8 and 512 bytes long and will be processed through
PBKDF2 before being used (see the
.Sy pbkdf2iters
property).
Even though the encryption suite cannot be changed after dataset creation,
the keyformat can be with
.Nm zfs Cm change-key .
.It Xo
.Sy keylocation Ns = Ns Sy prompt Ns | Ns Sy file:// Ns Ar /absolute/file/path Ns | Ns Sy https:// Ns Ar address Ns | Ns Sy http:// Ns Ar address
.Xc
Controls where the user's encryption key will be loaded from by default for
commands such as
.Nm zfs Cm load-key
and
.Nm zfs Cm mount Fl l .
This property is only set for encrypted datasets which are encryption roots.
If unspecified, the default is
.Sy prompt .
.Pp
Even though the encryption suite cannot be changed after dataset creation, the
keylocation can be with either
.Nm zfs Cm set
or
.Nm zfs Cm change-key .
If
.Sy prompt
is selected ZFS will ask for the key at the command prompt when it is required
to access the encrypted data (see
.Nm zfs Cm load-key
for details).
This setting will also allow the key to be passed in via the standard input
stream,
but users should be careful not to place keys which should be kept secret on
the command line.
If a file URI is selected, the key will be loaded from the
specified absolute file path.
If an HTTPS or HTTP URL is selected, it will be GETted using
.Xr fetch 3 ,
libcurl, or nothing, depending on compile-time configuration and run-time
availability.
The
.Sy SSL_CA_CERT_FILE
environment variable can be set to set the location
of the concatenated certificate store.
The
.Sy SSL_CA_CERT_PATH
environment variable can be set to override the location
of the directory containing the certificate authority bundle.
The
.Sy SSL_CLIENT_CERT_FILE
and
.Sy SSL_CLIENT_KEY_FILE
environment variables can be set to configure the path
to the client certificate and its key.
.It Sy pbkdf2iters Ns = Ns Ar iterations
Controls the number of PBKDF2 iterations that a
.Sy passphrase
encryption key should be run through when processing it into an encryption key.
This property is only defined when encryption is enabled and a keyformat of
.Sy passphrase
is selected.
The goal of PBKDF2 is to significantly increase the
computational difficulty needed to brute force a user's passphrase.
This is accomplished by forcing the attacker to run each passphrase through a
computationally expensive hashing function many times before they arrive at the
resulting key.
A user who actually knows the passphrase will only have to pay this cost once.
As CPUs become better at processing, this number should be
raised to ensure that a brute force attack is still not possible.
The current default is
.Sy 350000
and the minimum is
.Sy 100000 .
This property may be changed with
.Nm zfs Cm change-key .
.It Sy exec Ns = Ns Sy on Ns | Ns Sy off
Controls whether processes can be executed from within this file system.
The default value is
.Sy on .
The values
.Sy on
and
.Sy off
are equivalent to the
.Sy exec
and
.Sy noexec
mount options.
.It Sy filesystem_limit Ns = Ns Ar count Ns | Ns Sy none
Limits the number of filesystems and volumes that can exist under this point in
the dataset tree.
The limit is not enforced if the user is allowed to change the limit.
Setting a
.Sy filesystem_limit
to
.Sy on
a descendent of a filesystem that already has a
.Sy filesystem_limit
does not override the ancestor's
.Sy filesystem_limit ,
but rather imposes an additional limit.
This feature must be enabled to be used
.Po see
.Xr zpool-features 7
.Pc .
.It Sy special_small_blocks Ns = Ns Ar size
This value represents the threshold block size for including small file
blocks into the special allocation class.
Blocks smaller than or equal to this
value will be assigned to the special allocation class while greater blocks
will be assigned to the regular class.
Valid values are zero or a power of two from 512 up to 1048576 (1 MiB).
The default size is 0 which means no small file blocks
will be allocated in the special class.
.Pp
Before setting this property, a special class vdev must be added to the
pool.
See
.Xr zpoolconcepts 7
for more details on the special allocation class.
.It Sy mountpoint Ns = Ns Pa path Ns | Ns Sy none Ns | Ns Sy legacy
Controls the mount point used for this file system.
See the
.Sx Mount Points
section of
.Xr zfsconcepts 7
for more information on how this property is used.
.Pp
When the
.Sy mountpoint
property is changed for a file system, the file system and any children that
inherit the mount point are unmounted.
If the new value is
.Sy legacy ,
then they remain unmounted.
Otherwise, they are automatically remounted in the new location if the property
was previously
.Sy legacy
or
.Sy none ,
or if they were mounted before the property was changed.
In addition, any shared file systems are unshared and shared in the new
location.
.It Sy nbmand Ns = Ns Sy on Ns | Ns Sy off
Controls whether the file system should be mounted with
.Sy nbmand
.Pq Non-blocking mandatory locks .
Changes to this property only take effect when the file system is umounted and
remounted.
This was only supported by Linux prior to 5.15, and was buggy there,
and is not supported by
.Fx .
On Solaris it's used for SMB clients.
.It Sy overlay Ns = Ns Sy on Ns | Ns Sy off
Allow mounting on a busy directory or a directory which already contains
files or directories.
This is the default mount behavior for Linux and
.Fx
file systems.
On these platforms the property is
.Sy on
by default.
Set to
.Sy off
to disable overlay mounts for consistency with OpenZFS on other platforms.
.It Sy primarycache Ns = Ns Sy all Ns | Ns Sy none Ns | Ns Sy metadata
Controls what is cached in the primary cache
.Pq ARC .
If this property is set to
.Sy all ,
then both user data and metadata is cached.
If this property is set to
.Sy none ,
then neither user data nor metadata is cached.
If this property is set to
.Sy metadata ,
then only metadata is cached.
The default value is
.Sy all .
.It Sy quota Ns = Ns Ar size Ns | Ns Sy none
Limits the amount of space a dataset and its descendents can consume.
This property enforces a hard limit on the amount of space used.
This includes all space consumed by descendents, including file systems and
snapshots.
Setting a quota on a descendent of a dataset that already has a quota does not
override the ancestor's quota, but rather imposes an additional limit.
.Pp
Quotas cannot be set on volumes, as the
.Sy volsize
property acts as an implicit quota.
.It Sy snapshot_limit Ns = Ns Ar count Ns | Ns Sy none
Limits the number of snapshots that can be created on a dataset and its
descendents.
Setting a
.Sy snapshot_limit
on a descendent of a dataset that already has a
.Sy snapshot_limit
does not override the ancestor's
.Sy snapshot_limit ,
but rather imposes an additional limit.
The limit is not enforced if the user is allowed to change the limit.
For example, this means that recursive snapshots taken from the global zone are
counted against each delegated dataset within a zone.
This feature must be enabled to be used
.Po see
.Xr zpool-features 7
.Pc .
.It Sy userquota@ Ns Ar user Ns = Ns Ar size Ns | Ns Sy none
Limits the amount of space consumed by the specified user.
User space consumption is identified by the
.Sy userspace@ Ns Ar user
property.
.Pp
Enforcement of user quotas may be delayed by several seconds.
This delay means that a user might exceed their quota before the system notices
that they are over quota and begins to refuse additional writes with the
.Er EDQUOT
error message.
See the
.Nm zfs Cm userspace
command for more information.
.Pp
Unprivileged users can only access their own groups' space usage.
The root user, or a user who has been granted the
.Sy userquota
privilege with
.Nm zfs Cm allow ,
can get and set everyone's quota.
.Pp
This property is not available on volumes, on file systems before version 4, or
on pools before version 15.
The
.Sy userquota@ Ns Ar …
properties are not displayed by
.Nm zfs Cm get Sy all .
The user's name must be appended after the
.Sy @
symbol, using one of the following forms:
.Bl -bullet -compact -offset 4n
.It
POSIX name
.Pq Qq joe
.It
POSIX numeric ID
.Pq Qq 789
.It
SID name
.Pq Qq joe.smith@mydomain
.It
SID numeric ID
.Pq Qq S-1-123-456-789
.El
.Pp
Files created on Linux always have POSIX owners.
.It Sy userobjquota@ Ns Ar user Ns = Ns Ar size Ns | Ns Sy none
The
.Sy userobjquota
is similar to
.Sy userquota
but it limits the number of objects a user can create.
Please refer to
.Sy userobjused
for more information about how objects are counted.
.It Sy groupquota@ Ns Ar group Ns = Ns Ar size Ns | Ns Sy none
Limits the amount of space consumed by the specified group.
Group space consumption is identified by the
.Sy groupused@ Ns Ar group
property.
.Pp
Unprivileged users can access only their own groups' space usage.
The root user, or a user who has been granted the
.Sy groupquota
privilege with
.Nm zfs Cm allow ,
can get and set all groups' quotas.
.It Sy groupobjquota@ Ns Ar group Ns = Ns Ar size Ns | Ns Sy none
The
.Sy groupobjquota
is similar to
.Sy groupquota
but it limits number of objects a group can consume.
Please refer to
.Sy userobjused
for more information about how objects are counted.
.It Sy projectquota@ Ns Ar project Ns = Ns Ar size Ns | Ns Sy none
Limits the amount of space consumed by the specified project.
Project space consumption is identified by the
.Sy projectused@ Ns Ar project
property.
Please refer to
.Sy projectused
for more information about how project is identified and set/changed.
.Pp
The root user, or a user who has been granted the
.Sy projectquota
privilege with
.Nm zfs allow ,
can access all projects' quota.
.It Sy projectobjquota@ Ns Ar project Ns = Ns Ar size Ns | Ns Sy none
The
.Sy projectobjquota
is similar to
.Sy projectquota
but it limits number of objects a project can consume.
Please refer to
.Sy userobjused
for more information about how objects are counted.
.It Sy readonly Ns = Ns Sy on Ns | Ns Sy off
Controls whether this dataset can be modified.
The default value is
.Sy off .
The values
.Sy on
and
.Sy off
are equivalent to the
.Sy ro
and
.Sy rw
mount options.
.Pp
This property can also be referred to by its shortened column name,
.Sy rdonly .
.It Sy recordsize Ns = Ns Ar size
Specifies a suggested block size for files in the file system.
This property is designed solely for use with database workloads that access
files in fixed-size records.
ZFS automatically tunes block sizes according to internal algorithms optimized
for typical access patterns.
.Pp
For databases that create very large files but access them in small random
chunks, these algorithms may be suboptimal.
Specifying a
.Sy recordsize
greater than or equal to the record size of the database can result in
significant performance gains.
Use of this property for general purpose file systems is strongly discouraged,
and may adversely affect performance.
.Pp
The size specified must be a power of two greater than or equal to
.Ar 512 B
and less than or equal to
.Ar 128 KiB .
If the
.Sy large_blocks
feature is enabled on the pool, the size may be up to
.Ar 1 MiB .
See
.Xr zpool-features 7
for details on ZFS feature flags.
.Pp
Changing the file system's
.Sy recordsize
affects only files created afterward; existing files are unaffected.
.Pp
This property can also be referred to by its shortened column name,
.Sy recsize .
.It Sy redundant_metadata Ns = Ns Sy all Ns | Ns Sy most Ns | Ns Sy some Ns | Ns Sy none
Controls what types of metadata are stored redundantly.
ZFS stores an extra copy of metadata, so that if a single block is corrupted,
the amount of user data lost is limited.
This extra copy is in addition to any redundancy provided at the pool level
.Pq e.g. by mirroring or RAID-Z ,
and is in addition to an extra copy specified by the
.Sy copies
property
.Pq up to a total of 3 copies .
For example if the pool is mirrored,
.Sy copies Ns = Ns 2 ,
and
.Sy redundant_metadata Ns = Ns Sy most ,
then ZFS stores 6 copies of most metadata, and 4 copies of data and some
metadata.
.Pp
When set to
.Sy all ,
ZFS stores an extra copy of all metadata.
If a single on-disk block is corrupt, at worst a single block of user data
.Po which is
.Sy recordsize
bytes long
.Pc
can be lost.
.Pp
When set to
.Sy most ,
ZFS stores an extra copy of most types of metadata.
This can improve performance of random writes, because less metadata must be
written.
In practice, at worst about 1000 blocks
.Po of
.Sy recordsize
bytes each
.Pc
of user data can be lost if a single on-disk block is corrupt.
The exact behavior of which metadata blocks are stored redundantly may change in
future releases.
.Pp
When set to
.Sy some ,
ZFS stores an extra copy of only critical metadata.
This can improve file create performance since less metadata
needs to be written.
If a single on-disk block is corrupt, at worst a single user file can be lost.
.Pp
When set to
.Sy none ,
ZFS does not store any copies of metadata redundantly.
If a single on-disk block is corrupt, an entire dataset can be lost.
.Pp
The default value is
.Sy all .
.It Sy refquota Ns = Ns Ar size Ns | Ns Sy none
Limits the amount of space a dataset can consume.
This property enforces a hard limit on the amount of space used.
This hard limit does not include space used by descendents, including file
systems and snapshots.
.It Sy refreservation Ns = Ns Ar size Ns | Ns Sy none Ns | Ns Sy auto
The minimum amount of space guaranteed to a dataset, not including its
descendents.
When the amount of space used is below this value, the dataset is treated as if
it were taking up the amount of space specified by
.Sy refreservation .
The
.Sy refreservation
reservation is accounted for in the parent datasets' space used, and counts
against the parent datasets' quotas and reservations.
.Pp
If
.Sy refreservation
is set, a snapshot is only allowed if there is enough free pool space outside of
this reservation to accommodate the current number of
.Qq referenced
bytes in the dataset.
.Pp
If
.Sy refreservation
is set to
.Sy auto ,
a volume is thick provisioned
.Po or
.Qq not sparse
.Pc .
.Sy refreservation Ns = Ns Sy auto
is only supported on volumes.
See
.Sy volsize
in the
.Sx Native Properties
section for more information about sparse volumes.
.Pp
This property can also be referred to by its shortened column name,
.Sy refreserv .
.It Sy relatime Ns = Ns Sy on Ns | Ns Sy off
Controls the manner in which the access time is updated when
.Sy atime Ns = Ns Sy on
is set.
Turning this property on causes the access time to be updated relative
to the modify or change time.
Access time is only updated if the previous
access time was earlier than the current modify or change time or if the
existing access time hasn't been updated within the past 24 hours.
The default value is
.Sy on .
The values
.Sy on
and
.Sy off
are equivalent to the
.Sy relatime
and
.Sy norelatime
mount options.
.It Sy reservation Ns = Ns Ar size Ns | Ns Sy none
The minimum amount of space guaranteed to a dataset and its descendants.
When the amount of space used is below this value, the dataset is treated as if
it were taking up the amount of space specified by its reservation.
Reservations are accounted for in the parent datasets' space used, and count
against the parent datasets' quotas and reservations.
.Pp
This property can also be referred to by its shortened column name,
.Sy reserv .
.It Sy secondarycache Ns = Ns Sy all Ns | Ns Sy none Ns | Ns Sy metadata
Controls what is cached in the secondary cache
.Pq L2ARC .
If this property is set to
.Sy all ,
then both user data and metadata is cached.
If this property is set to
.Sy none ,
then neither user data nor metadata is cached.
If this property is set to
.Sy metadata ,
then only metadata is cached.
The default value is
.Sy all .
.It Sy setuid Ns = Ns Sy on Ns | Ns Sy off
Controls whether the setuid bit is respected for the file system.
The default value is
.Sy on .
The values
.Sy on
and
.Sy off
are equivalent to the
.Sy suid
and
.Sy nosuid
mount options.
.It Sy sharesmb Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Ar opts
Controls whether the file system is shared by using
.Sy Samba USERSHARES
and what options are to be used.
Otherwise, the file system is automatically shared and unshared with the
.Nm zfs Cm share
and
.Nm zfs Cm unshare
commands.
If the property is set to on, the
.Xr net 8
command is invoked to create a
.Sy USERSHARE .
.Pp
Because SMB shares requires a resource name, a unique resource name is
constructed from the dataset name.
The constructed name is a copy of the
dataset name except that the characters in the dataset name, which would be
invalid in the resource name, are replaced with underscore (_) characters.
Linux does not currently support additional options which might be available
on Solaris.
.Pp
If the
.Sy sharesmb
property is set to
.Sy off ,
the file systems are unshared.
.Pp
The share is created with the ACL (Access Control List) "Everyone:F" ("F"
stands for "full permissions", i.e. read and write permissions) and no guest
access (which means Samba must be able to authenticate a real user \(em
.Xr passwd 5 Ns / Ns Xr shadow 5 Ns - ,
LDAP- or
.Xr smbpasswd 5 Ns -based )
by default.
This means that any additional access control
(disallow specific user specific access etc) must be done on the underlying file
system.
.It Sy sharenfs Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Ar opts
Controls whether the file system is shared via NFS, and what options are to be
used.
A file system with a
.Sy sharenfs
property of
.Sy off
is managed with the
.Xr exportfs 8
command and entries in the
.Pa /etc/exports
file.
Otherwise, the file system is automatically shared and unshared with the
.Nm zfs Cm share
and
.Nm zfs Cm unshare
commands.
If the property is set to
.Sy on ,
the dataset is shared using the default options:
.Dl sec=sys,rw,crossmnt,no_subtree_check
.Pp
Please note that the options are comma-separated, unlike those found in
.Xr exports 5 .
This is done to negate the need for quoting, as well as to make parsing
with scripts easier.
.Pp
See
.Xr exports 5
for the meaning of the default options.
Otherwise, the
.Xr exportfs 8
command is invoked with options equivalent to the contents of this property.
.Pp
When the
.Sy sharenfs
property is changed for a dataset, the dataset and any children inheriting the
property are re-shared with the new options, only if the property was previously
.Sy off ,
or if they were shared before the property was changed.
If the new property is
.Sy off ,
the file systems are unshared.
.It Sy logbias Ns = Ns Sy latency Ns | Ns Sy throughput
Provide a hint to ZFS about handling of synchronous requests in this dataset.
If
.Sy logbias
is set to
.Sy latency
.Pq the default ,
ZFS will use pool log devices
.Pq if configured
to handle the requests at low latency.
If
.Sy logbias
is set to
.Sy throughput ,
ZFS will not use configured pool log devices.
ZFS will instead optimize synchronous operations for global pool throughput and
efficient use of resources.
.It Sy snapdev Ns = Ns Sy hidden Ns | Ns Sy visible
Controls whether the volume snapshot devices under
.Pa /dev/zvol/ Ns Aq Ar pool
are hidden or visible.
The default value is
.Sy hidden .
.It Sy snapdir Ns = Ns Sy hidden Ns | Ns Sy visible
Controls whether the
.Pa .zfs
directory is hidden or visible in the root of the file system as discussed in
the
.Sx Snapshots
section of
.Xr zfsconcepts 7 .
The default value is
.Sy hidden .
.It Sy sync Ns = Ns Sy standard Ns | Ns Sy always Ns | Ns Sy disabled
Controls the behavior of synchronous requests
.Pq e.g. fsync, O_DSYNC .
.Sy standard
is the POSIX-specified behavior of ensuring all synchronous requests
are written to stable storage and all devices are flushed to ensure
data is not cached by device controllers
.Pq this is the default .
.Sy always
causes every file system transaction to be written and flushed before its
system call returns.
This has a large performance penalty.
.Sy disabled
disables synchronous requests.
File system transactions are only committed to stable storage periodically.
This option will give the highest performance.
However, it is very dangerous as ZFS would be ignoring the synchronous
transaction demands of applications such as databases or NFS.
Administrators should only use this option when the risks are understood.
.It Sy version Ns = Ns Ar N Ns | Ns Sy current
The on-disk version of this file system, which is independent of the pool
version.
This property can only be set to later supported versions.
See the
.Nm zfs Cm upgrade
command.
.It Sy volsize Ns = Ns Ar size
For volumes, specifies the logical size of the volume.
By default, creating a volume establishes a reservation of equal size.
For storage pools with a version number of 9 or higher, a
.Sy refreservation
is set instead.
Any changes to
.Sy volsize
are reflected in an equivalent change to the reservation
.Pq or Sy refreservation .
The
.Sy volsize
can only be set to a multiple of
.Sy volblocksize ,
and cannot be zero.
.Pp
The reservation is kept equal to the volume's logical size to prevent unexpected
behavior for consumers.
Without the reservation, the volume could run out of space, resulting in
undefined behavior or data corruption, depending on how the volume is used.
These effects can also occur when the volume size is changed while it is in use
.Pq particularly when shrinking the size .
Extreme care should be used when adjusting the volume size.
.Pp
Though not recommended, a
.Qq sparse volume
.Po also known as
.Qq thin provisioned
.Pc
can be created by specifying the
.Fl s
option to the
.Nm zfs Cm create Fl V
command, or by changing the value of the
.Sy refreservation
property
.Po or
.Sy reservation
property on pool version 8 or earlier
.Pc
after the volume has been created.
A
.Qq sparse volume
is a volume where the value of
.Sy refreservation
is less than the size of the volume plus the space required to store its
metadata.
Consequently, writes to a sparse volume can fail with
.Er ENOSPC
when the pool is low on space.
For a sparse volume, changes to
.Sy volsize
are not reflected in the
.Sy refreservation .
A volume that is not sparse is said to be
.Qq thick provisioned .
A sparse volume can become thick provisioned by setting
.Sy refreservation
to
.Sy auto .
.It Sy volmode Ns = Ns Sy default Ns | Ns Sy full Ns | Ns Sy geom Ns | Ns Sy dev Ns | Ns Sy none
This property specifies how volumes should be exposed to the OS.
Setting it to
.Sy full
exposes volumes as fully fledged block devices, providing maximal
functionality.
The value
.Sy geom
is just an alias for
.Sy full
and is kept for compatibility.
Setting it to
.Sy dev
hides its partitions.
Volumes with property set to
.Sy none
are not exposed outside ZFS, but can be snapshotted, cloned, replicated, etc,
that can be suitable for backup purposes.
Value
.Sy default
means that volumes exposition is controlled by system-wide tunable
.Sy zvol_volmode ,
where
.Sy full ,
.Sy dev
and
.Sy none
are encoded as 1, 2 and 3 respectively.
The default value is
.Sy full .
.It Sy vscan Ns = Ns Sy on Ns | Ns Sy off
Controls whether regular files should be scanned for viruses when a file is
opened and closed.
In addition to enabling this property, the virus scan service must also be
enabled for virus scanning to occur.
The default value is
.Sy off .
This property is not used by OpenZFS.
.It Sy xattr Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Sy sa
Controls whether extended attributes are enabled for this file system.
Two styles of extended attributes are supported: either directory-based
or system-attribute-based.
.Pp
The default value of
.Sy on
enables directory-based extended attributes.
This style of extended attribute imposes no practical limit
on either the size or number of attributes which can be set on a file.
Although under Linux the
.Xr getxattr 2
and
.Xr setxattr 2
system calls limit the maximum size to
.Sy 64K .
This is the most compatible
style of extended attribute and is supported by all ZFS implementations.
.Pp
System-attribute-based xattrs can be enabled by setting the value to
.Sy sa .
The key advantage of this type of xattr is improved performance.
Storing extended attributes as system attributes
significantly decreases the amount of disk I/O required.
Up to
.Sy 64K
of data may be stored per-file in the space reserved for system attributes.
If there is not enough space available for an extended attribute
then it will be automatically written as a directory-based xattr.
System-attribute-based extended attributes are not accessible
on platforms which do not support the
.Sy xattr Ns = Ns Sy sa
feature.
OpenZFS supports
.Sy xattr Ns = Ns Sy sa
on both
.Fx
and Linux.
.Pp
The use of system-attribute-based xattrs is strongly encouraged for users of
SELinux or POSIX ACLs.
Both of these features heavily rely on extended
attributes and benefit significantly from the reduced access time.
.Pp
The values
.Sy on
and
.Sy off
are equivalent to the
.Sy xattr
and
.Sy noxattr
mount options.
.It Sy jailed Ns = Ns Sy off Ns | Ns Sy on
Controls whether the dataset is managed from a jail.
See
.Xr zfs-jail 8
for more information.
Jails are a
.Fx
-feature and are not relevant on other platforms.
-The default value is
-.Sy off .
-.It Sy zoned Ns = Ns Sy on Ns | Ns Sy off
+feature and this property is not available on other platforms.
+.It Sy zoned Ns = Ns Sy off Ns | Ns Sy on
Controls whether the dataset is managed from a non-global zone or namespace.
-The default value is
-.Sy off .
+See
+.Xr zfs-zone 8
+for more information.
+Zoning is a
+Linux
+feature and this property is not available on other platforms.
.El
.Pp
The following three properties cannot be changed after the file system is
created, and therefore, should be set when the file system is created.
If the properties are not set with the
.Nm zfs Cm create
or
.Nm zpool Cm create
commands, these properties are inherited from the parent dataset.
If the parent dataset lacks these properties due to having been created prior to
these features being supported, the new file system will have the default values
for these properties.
.Bl -tag -width ""
.It Xo
.Sy casesensitivity Ns = Ns Sy sensitive Ns | Ns
.Sy insensitive Ns | Ns Sy mixed
.Xc
Indicates whether the file name matching algorithm used by the file system
should be case-sensitive, case-insensitive, or allow a combination of both
styles of matching.
The default value for the
.Sy casesensitivity
property is
.Sy sensitive .
Traditionally,
.Ux
and POSIX file systems have case-sensitive file names.
.Pp
The
.Sy mixed
value for the
.Sy casesensitivity
property indicates that the file system can support requests for both
case-sensitive and case-insensitive matching behavior.
Currently, case-insensitive matching behavior on a file system that supports
mixed behavior is limited to the SMB server product.
For more information about the
.Sy mixed
value behavior, see the "ZFS Administration Guide".
.It Xo
.Sy normalization Ns = Ns Sy none Ns | Ns Sy formC Ns | Ns
.Sy formD Ns | Ns Sy formKC Ns | Ns Sy formKD
.Xc
Indicates whether the file system should perform a
.Sy unicode
normalization of file names whenever two file names are compared, and which
normalization algorithm should be used.
File names are always stored unmodified, names are normalized as part of any
comparison process.
If this property is set to a legal value other than
.Sy none ,
and the
.Sy utf8only
property was left unspecified, the
.Sy utf8only
property is automatically set to
.Sy on .
The default value of the
.Sy normalization
property is
.Sy none .
This property cannot be changed after the file system is created.
.It Sy utf8only Ns = Ns Sy on Ns | Ns Sy off
Indicates whether the file system should reject file names that include
characters that are not present in the
.Sy UTF-8
character code set.
If this property is explicitly set to
.Sy off ,
the normalization property must either not be explicitly set or be set to
.Sy none .
The default value for the
.Sy utf8only
property is
.Sy off .
This property cannot be changed after the file system is created.
.El
.Pp
The
.Sy casesensitivity ,
.Sy normalization ,
and
.Sy utf8only
properties are also new permissions that can be assigned to non-privileged users
by using the ZFS delegated administration feature.
.
.Ss Temporary Mount Point Properties
When a file system is mounted, either through
.Xr mount 8
for legacy mounts or the
.Nm zfs Cm mount
command for normal file systems, its mount options are set according to its
properties.
The correlation between properties and mount options is as follows:
.Bl -tag -compact -offset Ds -width "rootcontext="
.It Sy atime
atime/noatime
.It Sy canmount
auto/noauto
.It Sy devices
dev/nodev
.It Sy exec
exec/noexec
.It Sy readonly
ro/rw
.It Sy relatime
relatime/norelatime
.It Sy setuid
suid/nosuid
.It Sy xattr
xattr/noxattr
.It Sy nbmand
mand/nomand
.It Sy context Ns =
context=
.It Sy fscontext Ns =
fscontext=
.It Sy defcontext Ns =
defcontext=
.It Sy rootcontext Ns =
rootcontext=
.El
.Pp
In addition, these options can be set on a per-mount basis using the
.Fl o
option, without affecting the property that is stored on disk.
The values specified on the command line override the values stored in the
dataset.
The
.Sy nosuid
option is an alias for
.Sy nodevices , Ns Sy nosetuid .
These properties are reported as
.Qq temporary
by the
.Nm zfs Cm get
command.
If the properties are changed while the dataset is mounted, the new setting
overrides any temporary settings.
.
.Ss User Properties
In addition to the standard native properties, ZFS supports arbitrary user
properties.
User properties have no effect on ZFS behavior, but applications or
administrators can use them to annotate datasets
.Pq file systems, volumes, and snapshots .
.Pp
User property names must contain a colon
.Pq Qq Sy \&:
character to distinguish them from native properties.
They may contain lowercase letters, numbers, and the following punctuation
characters: colon
.Pq Qq Sy \&: ,
dash
.Pq Qq Sy - ,
period
.Pq Qq Sy \&. ,
and underscore
.Pq Qq Sy _ .
The expected convention is that the property name is divided into two portions
such as
.Ar module : Ns Ar property ,
but this namespace is not enforced by ZFS.
User property names can be at most 256 characters, and cannot begin with a dash
.Pq Qq Sy - .
.Pp
When making programmatic use of user properties, it is strongly suggested to use
a reversed DNS domain name for the
.Ar module
component of property names to reduce the chance that two
independently-developed packages use the same property name for different
purposes.
.Pp
The values of user properties are arbitrary strings, are always inherited, and
are never validated.
All of the commands that operate on properties
.Po Nm zfs Cm list ,
.Nm zfs Cm get ,
.Nm zfs Cm set ,
and so forth
.Pc
can be used to manipulate both native properties and user properties.
Use the
.Nm zfs Cm inherit
command to clear a user property.
If the property is not defined in any parent dataset, it is removed entirely.
Property values are limited to 8192 bytes.
diff --git a/sys/contrib/openzfs/man/man8/zfs-send.8 b/sys/contrib/openzfs/man/man8/zfs-send.8
index 8cc6ae6ad59b..ba604bf77855 100644
--- a/sys/contrib/openzfs/man/man8/zfs-send.8
+++ b/sys/contrib/openzfs/man/man8/zfs-send.8
@@ -1,718 +1,734 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or https://opensource.org/licenses/CDDL-1.0.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2009 Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright 2011 Joshua M. Clulow <josh@sysmgr.org>
.\" Copyright (c) 2011, 2019 by Delphix. All rights reserved.
.\" Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
.\" Copyright (c) 2014, Joyent, Inc. All rights reserved.
.\" Copyright (c) 2014 by Adam Stevko. All rights reserved.
.\" Copyright (c) 2014 Integros [integros.com]
.\" Copyright 2019 Richard Laager. All rights reserved.
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd January 12, 2023
+.Dd July 27, 2023
.Dt ZFS-SEND 8
.Os
.
.Sh NAME
.Nm zfs-send
.Nd generate backup stream of ZFS dataset
.Sh SYNOPSIS
.Nm zfs
.Cm send
.Op Fl DLPVbcehnpsvw
.Op Fl R Op Fl X Ar dataset Ns Oo , Ns Ar dataset Oc Ns …
.Op Oo Fl I Ns | Ns Fl i Oc Ar snapshot
.Ar snapshot
.Nm zfs
.Cm send
.Op Fl DLPVcensvw
.Op Fl i Ar snapshot Ns | Ns Ar bookmark
.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
.Nm zfs
.Cm send
.Fl -redact Ar redaction_bookmark
.Op Fl DLPVcenpv
.Op Fl i Ar snapshot Ns | Ns Ar bookmark
.Ar snapshot
.Nm zfs
.Cm send
.Op Fl PVenv
.Fl t
.Ar receive_resume_token
.Nm zfs
.Cm send
.Op Fl PVnv
.Fl S Ar filesystem
.Nm zfs
.Cm redact
.Ar snapshot redaction_bookmark
.Ar redaction_snapshot Ns …
.
.Sh DESCRIPTION
.Bl -tag -width ""
.It Xo
.Nm zfs
.Cm send
.Op Fl DLPVbcehnpsvw
.Op Fl R Op Fl X Ar dataset Ns Oo , Ns Ar dataset Oc Ns …
.Op Oo Fl I Ns | Ns Fl i Oc Ar snapshot
.Ar snapshot
.Xc
Creates a stream representation of the second
.Ar snapshot ,
which is written to standard output.
The output can be redirected to a file or to a different system
.Po for example, using
.Xr ssh 1
.Pc .
By default, a full stream is generated.
.Bl -tag -width "-D"
.It Fl D , -dedup
Deduplicated send is no longer supported.
This flag is accepted for backwards compatibility, but a regular,
non-deduplicated stream will be generated.
.It Fl I Ar snapshot
Generate a stream package that sends all intermediary snapshots from the first
snapshot to the second snapshot.
For example,
.Fl I Em @a Em fs@d
is similar to
.Fl i Em @a Em fs@b Ns \&; Fl i Em @b Em fs@c Ns \&; Fl i Em @c Em fs@d .
The incremental source may be specified as with the
.Fl i
option.
.It Fl L , -large-block
Generate a stream which may contain blocks larger than 128 KiB.
This flag has no effect if the
.Sy large_blocks
pool feature is disabled, or if the
.Sy recordsize
property of this filesystem has never been set above 128 KiB.
The receiving system must have the
.Sy large_blocks
pool feature enabled as well.
See
.Xr zpool-features 7
for details on ZFS feature flags and the
.Sy large_blocks
feature.
.It Fl P , -parsable
Print machine-parsable verbose information about the stream package generated.
.It Fl R , -replicate
Generate a replication stream package, which will replicate the specified
file system, and all descendent file systems, up to the named snapshot.
When received, all properties, snapshots, descendent file systems, and clones
are preserved.
.Pp
If the
.Fl i
or
.Fl I
flags are used in conjunction with the
.Fl R
flag, an incremental replication stream is generated.
The current values of properties, and current snapshot and file system names are
set when the stream is received.
If the
.Fl F
flag is specified when this stream is received, snapshots and file systems that
do not exist on the sending side are destroyed.
If the
.Fl R
flag is used to send encrypted datasets, then
.Fl w
must also be specified.
.It Fl V , -proctitle
Set the process title to a per-second report of how much data has been sent.
.It Fl X , -exclude Ar dataset Ns Oo , Ns Ar dataset Oc Ns …
With
.Fl R ,
.Fl X
specifies a set of datasets (and, hence, their descendants),
to be excluded from the send stream.
The root dataset may not be excluded.
.Fl X Ar a Fl X Ar b
is equivalent to
.Fl X Ar a , Ns Ar b .
.It Fl e , -embed
Generate a more compact stream by using
.Sy WRITE_EMBEDDED
records for blocks which are stored more compactly on disk by the
.Sy embedded_data
pool feature.
This flag has no effect if the
.Sy embedded_data
feature is disabled.
The receiving system must have the
.Sy embedded_data
feature enabled.
If the
.Sy lz4_compress
feature is active on the sending system, then the receiving system must have
that feature enabled as well.
Datasets that are sent with this flag may not be
received as an encrypted dataset, since encrypted datasets cannot use the
.Sy embedded_data
feature.
See
.Xr zpool-features 7
for details on ZFS feature flags and the
.Sy embedded_data
feature.
.It Fl b , -backup
Sends only received property values whether or not they are overridden by local
settings, but only if the dataset has ever been received.
Use this option when you want
.Nm zfs Cm receive
to restore received properties backed up on the sent dataset and to avoid
sending local settings that may have nothing to do with the source dataset,
but only with how the data is backed up.
.It Fl c , -compressed
Generate a more compact stream by using compressed WRITE records for blocks
which are compressed on disk and in memory
.Po see the
.Sy compression
property for details
.Pc .
If the
.Sy lz4_compress
feature is active on the sending system, then the receiving system must have
that feature enabled as well.
If the
.Sy large_blocks
feature is enabled on the sending system but the
.Fl L
option is not supplied in conjunction with
.Fl c ,
then the data will be decompressed before sending so it can be split into
smaller block sizes.
Streams sent with
.Fl c
will not have their data recompressed on the receiver side using
.Fl o Sy compress Ns = Ar value .
The data will stay compressed as it was from the sender.
The new compression property will be set for future data.
Note that uncompressed data from the sender will still attempt to
compress on the receiver, unless you specify
.Fl o Sy compress Ns = Em off .
.It Fl w , -raw
For encrypted datasets, send data exactly as it exists on disk.
This allows backups to be taken even if encryption keys are not currently
loaded.
The backup may then be received on an untrusted machine since that machine will
not have the encryption keys to read the protected data or alter it without
being detected.
Upon being received, the dataset will have the same encryption
keys as it did on the send side, although the
.Sy keylocation
property will be defaulted to
.Sy prompt
if not otherwise provided.
For unencrypted datasets, this flag will be equivalent to
.Fl Lec .
Note that if you do not use this flag for sending encrypted datasets, data will
be sent unencrypted and may be re-encrypted with a different encryption key on
the receiving system, which will disable the ability to do a raw send to that
system for incrementals.
.It Fl h , -holds
Generate a stream package that includes any snapshot holds (created with the
.Nm zfs Cm hold
command), and indicating to
.Nm zfs Cm receive
that the holds be applied to the dataset on the receiving system.
.It Fl i Ar snapshot
Generate an incremental stream from the first
.Ar snapshot
.Pq the incremental source
to the second
.Ar snapshot
.Pq the incremental target .
The incremental source can be specified as the last component of the snapshot
name
.Po the
.Sy @
character and following
.Pc
and it is assumed to be from the same file system as the incremental target.
.Pp
If the destination is a clone, the source may be the origin snapshot, which must
be fully specified
.Po for example,
.Em pool/fs@origin ,
not just
.Em @origin
.Pc .
.It Fl n , -dryrun
Do a dry-run
.Pq Qq No-op
send.
Do not generate any actual send data.
This is useful in conjunction with the
.Fl v
or
.Fl P
flags to determine what data will be sent.
In this case, the verbose output will be written to standard output
.Po contrast with a non-dry-run, where the stream is written to standard output
and the verbose output goes to standard error
.Pc .
.It Fl p , -props
Include the dataset's properties in the stream.
This flag is implicit when
.Fl R
is specified.
The receiving system must also support this feature.
Sends of encrypted datasets must use
.Fl w
when using this flag.
.It Fl s , -skip-missing
Allows sending a replication stream even when there are snapshots missing in the
hierarchy.
When a snapshot is missing, instead of throwing an error and aborting the send,
a warning is printed to the standard error stream and the dataset to which it
belongs
and its descendents are skipped.
This flag can only be used in conjunction with
.Fl R .
.It Fl v , -verbose
Print verbose information about the stream package generated.
This information includes a per-second report of how much data has been sent.
+The same report can be requested by sending
+.Dv SIGINFO
+or
+.Dv SIGUSR1 ,
+regardless of
+.Fl v .
.Pp
The format of the stream is committed.
You will be able to receive your streams on future versions of ZFS.
.El
.It Xo
.Nm zfs
.Cm send
.Op Fl DLPVcenvw
.Op Fl i Ar snapshot Ns | Ns Ar bookmark
.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
.Xc
Generate a send stream, which may be of a filesystem, and may be incremental
from a bookmark.
If the destination is a filesystem or volume, the pool must be read-only, or the
filesystem must not be mounted.
When the stream generated from a filesystem or volume is received, the default
snapshot name will be
.Qq --head-- .
.Bl -tag -width "-D"
.It Fl D , -dedup
Deduplicated send is no longer supported.
This flag is accepted for backwards compatibility, but a regular,
non-deduplicated stream will be generated.
.It Fl L , -large-block
Generate a stream which may contain blocks larger than 128 KiB.
This flag has no effect if the
.Sy large_blocks
pool feature is disabled, or if the
.Sy recordsize
property of this filesystem has never been set above 128 KiB.
The receiving system must have the
.Sy large_blocks
pool feature enabled as well.
See
.Xr zpool-features 7
for details on ZFS feature flags and the
.Sy large_blocks
feature.
.It Fl P , -parsable
Print machine-parsable verbose information about the stream package generated.
.It Fl c , -compressed
Generate a more compact stream by using compressed WRITE records for blocks
which are compressed on disk and in memory
.Po see the
.Sy compression
property for details
.Pc .
If the
.Sy lz4_compress
feature is active on the sending system, then the receiving system must have
that feature enabled as well.
If the
.Sy large_blocks
feature is enabled on the sending system but the
.Fl L
option is not supplied in conjunction with
.Fl c ,
then the data will be decompressed before sending so it can be split into
smaller block sizes.
.It Fl w , -raw
For encrypted datasets, send data exactly as it exists on disk.
This allows backups to be taken even if encryption keys are not currently
loaded.
The backup may then be received on an untrusted machine since that machine will
not have the encryption keys to read the protected data or alter it without
being detected.
Upon being received, the dataset will have the same encryption
keys as it did on the send side, although the
.Sy keylocation
property will be defaulted to
.Sy prompt
if not otherwise provided.
For unencrypted datasets, this flag will be equivalent to
.Fl Lec .
Note that if you do not use this flag for sending encrypted datasets, data will
be sent unencrypted and may be re-encrypted with a different encryption key on
the receiving system, which will disable the ability to do a raw send to that
system for incrementals.
.It Fl e , -embed
Generate a more compact stream by using
.Sy WRITE_EMBEDDED
records for blocks which are stored more compactly on disk by the
.Sy embedded_data
pool feature.
This flag has no effect if the
.Sy embedded_data
feature is disabled.
The receiving system must have the
.Sy embedded_data
feature enabled.
If the
.Sy lz4_compress
feature is active on the sending system, then the receiving system must have
that feature enabled as well.
Datasets that are sent with this flag may not be received as an encrypted
dataset,
since encrypted datasets cannot use the
.Sy embedded_data
feature.
See
.Xr zpool-features 7
for details on ZFS feature flags and the
.Sy embedded_data
feature.
.It Fl i Ar snapshot Ns | Ns Ar bookmark
Generate an incremental send stream.
The incremental source must be an earlier snapshot in the destination's history.
It will commonly be an earlier snapshot in the destination's file system, in
which case it can be specified as the last component of the name
.Po the
.Sy #
or
.Sy @
character and following
.Pc .
.Pp
If the incremental target is a clone, the incremental source can be the origin
snapshot, or an earlier snapshot in the origin's filesystem, or the origin's
origin, etc.
.It Fl n , -dryrun
Do a dry-run
.Pq Qq No-op
send.
Do not generate any actual send data.
This is useful in conjunction with the
.Fl v
or
.Fl P
flags to determine what data will be sent.
In this case, the verbose output will be written to standard output
.Po contrast with a non-dry-run, where the stream is written to standard output
and the verbose output goes to standard error
.Pc .
.It Fl v , -verbose
Print verbose information about the stream package generated.
This information includes a per-second report of how much data has been sent.
+The same report can be requested by sending
+.Dv SIGINFO
+or
+.Dv SIGUSR1 ,
+regardless of
+.Fl v .
.El
.It Xo
.Nm zfs
.Cm send
.Fl -redact Ar redaction_bookmark
.Op Fl DLPVcenpv
.Op Fl i Ar snapshot Ns | Ns Ar bookmark
.Ar snapshot
.Xc
Generate a redacted send stream.
This send stream contains all blocks from the snapshot being sent that aren't
included in the redaction list contained in the bookmark specified by the
.Fl -redact
(or
.Fl d )
flag.
The resulting send stream is said to be redacted with respect to the snapshots
the bookmark specified by the
.Fl -redact No flag was created with .
The bookmark must have been created by running
.Nm zfs Cm redact
on the snapshot being sent.
.Pp
This feature can be used to allow clones of a filesystem to be made available on
a remote system, in the case where their parent need not (or needs to not) be
usable.
For example, if a filesystem contains sensitive data, and it has clones where
that sensitive data has been secured or replaced with dummy data, redacted sends
can be used to replicate the secured data without replicating the original
sensitive data, while still sharing all possible blocks.
A snapshot that has been redacted with respect to a set of snapshots will
contain all blocks referenced by at least one snapshot in the set, but will
contain none of the blocks referenced by none of the snapshots in the set.
In other words, if all snapshots in the set have modified a given block in the
parent, that block will not be sent; but if one or more snapshots have not
modified a block in the parent, they will still reference the parent's block, so
that block will be sent.
Note that only user data will be redacted.
.Pp
When the redacted send stream is received, we will generate a redacted
snapshot.
Due to the nature of redaction, a redacted dataset can only be used in the
following ways:
.Bl -enum -width "a."
.It
To receive, as a clone, an incremental send from the original snapshot to one
of the snapshots it was redacted with respect to.
In this case, the stream will produce a valid dataset when received because all
blocks that were redacted in the parent are guaranteed to be present in the
child's send stream.
This use case will produce a normal snapshot, which can be used just like other
snapshots.
.
.It
To receive an incremental send from the original snapshot to something
redacted with respect to a subset of the set of snapshots the initial snapshot
was redacted with respect to.
In this case, each block that was redacted in the original is still redacted
(redacting with respect to additional snapshots causes less data to be redacted
(because the snapshots define what is permitted, and everything else is
redacted)).
This use case will produce a new redacted snapshot.
.It
To receive an incremental send from a redaction bookmark of the original
snapshot that was created when redacting with respect to a subset of the set of
snapshots the initial snapshot was created with respect to
anything else.
A send stream from such a redaction bookmark will contain all of the blocks
necessary to fill in any redacted data, should it be needed, because the sending
system is aware of what blocks were originally redacted.
This will either produce a normal snapshot or a redacted one, depending on
whether the new send stream is redacted.
.It
To receive an incremental send from a redacted version of the initial
snapshot that is redacted with respect to a subject of the set of snapshots the
initial snapshot was created with respect to.
A send stream from a compatible redacted dataset will contain all of the blocks
necessary to fill in any redacted data.
This will either produce a normal snapshot or a redacted one, depending on
whether the new send stream is redacted.
.It
To receive a full send as a clone of the redacted snapshot.
Since the stream is a full send, it definitionally contains all the data needed
to create a new dataset.
This use case will either produce a normal snapshot or a redacted one, depending
on whether the full send stream was redacted.
.El
.Pp
These restrictions are detected and enforced by
.Nm zfs Cm receive ;
a redacted send stream will contain the list of snapshots that the stream is
redacted with respect to.
These are stored with the redacted snapshot, and are used to detect and
correctly handle the cases above.
Note that for technical reasons,
raw sends and redacted sends cannot be combined at this time.
.It Xo
.Nm zfs
.Cm send
.Op Fl PVenv
.Fl t
.Ar receive_resume_token
.Xc
Creates a send stream which resumes an interrupted receive.
The
.Ar receive_resume_token
is the value of this property on the filesystem or volume that was being
received into.
See the documentation for
.Nm zfs Cm receive Fl s
for more details.
.It Xo
.Nm zfs
.Cm send
.Op Fl PVnv
.Op Fl i Ar snapshot Ns | Ns Ar bookmark
.Fl S
.Ar filesystem
.Xc
Generate a send stream from a dataset that has been partially received.
.Bl -tag -width "-L"
.It Fl S , -saved
This flag requires that the specified filesystem previously received a resumable
send that did not finish and was interrupted.
In such scenarios this flag
enables the user to send this partially received state.
Using this flag will always use the last fully received snapshot
as the incremental source if it exists.
.El
.It Xo
.Nm zfs
.Cm redact
.Ar snapshot redaction_bookmark
.Ar redaction_snapshot Ns …
.Xc
Generate a new redaction bookmark.
In addition to the typical bookmark information, a redaction bookmark contains
the list of redacted blocks and the list of redaction snapshots specified.
The redacted blocks are blocks in the snapshot which are not referenced by any
of the redaction snapshots.
These blocks are found by iterating over the metadata in each redaction snapshot
to determine what has been changed since the target snapshot.
Redaction is designed to support redacted zfs sends; see the entry for
.Nm zfs Cm send
for more information on the purpose of this operation.
If a redact operation fails partway through (due to an error or a system
failure), the redaction can be resumed by rerunning the same command.
.El
.Ss Redaction
ZFS has support for a limited version of data subsetting, in the form of
redaction.
Using the
.Nm zfs Cm redact
command, a
.Sy redaction bookmark
can be created that stores a list of blocks containing sensitive information.
When provided to
.Nm zfs Cm send ,
this causes a
.Sy redacted send
to occur.
Redacted sends omit the blocks containing sensitive information,
replacing them with REDACT records.
When these send streams are received, a
.Sy redacted dataset
is created.
A redacted dataset cannot be mounted by default, since it is incomplete.
It can be used to receive other send streams.
In this way datasets can be used for data backup and replication,
with all the benefits that zfs send and receive have to offer,
while protecting sensitive information from being
stored on less-trusted machines or services.
.Pp
For the purposes of redaction, there are two steps to the process.
A redact step, and a send/receive step.
First, a redaction bookmark is created.
This is done by providing the
.Nm zfs Cm redact
command with a parent snapshot, a bookmark to be created, and a number of
redaction snapshots.
These redaction snapshots must be descendants of the parent snapshot,
and they should modify data that is considered sensitive in some way.
Any blocks of data modified by all of the redaction snapshots will
be listed in the redaction bookmark, because it represents the truly sensitive
information.
When it comes to the send step, the send process will not send
the blocks listed in the redaction bookmark, instead replacing them with
REDACT records.
When received on the target system, this will create a
redacted dataset, missing the data that corresponds to the blocks in the
redaction bookmark on the sending system.
The incremental send streams from
the original parent to the redaction snapshots can then also be received on
the target system, and this will produce a complete snapshot that can be used
normally.
Incrementals from one snapshot on the parent filesystem and another
can also be done by sending from the redaction bookmark, rather than the
snapshots themselves.
.Pp
In order to make the purpose of the feature more clear, an example is provided.
Consider a zfs filesystem containing four files.
These files represent information for an online shopping service.
One file contains a list of usernames and passwords, another contains purchase
histories,
a third contains click tracking data, and a fourth contains user preferences.
The owner of this data wants to make it available for their development teams to
test against, and their market research teams to do analysis on.
The development teams need information about user preferences and the click
tracking data, while the market research teams need information about purchase
histories and user preferences.
Neither needs access to the usernames and passwords.
However, because all of this data is stored in one ZFS filesystem,
it must all be sent and received together.
In addition, the owner of the data
wants to take advantage of features like compression, checksumming, and
snapshots, so they do want to continue to use ZFS to store and transmit their
data.
Redaction can help them do so.
First, they would make two clones of a snapshot of the data on the source.
In one clone, they create the setup they want their market research team to see;
they delete the usernames and passwords file,
and overwrite the click tracking data with dummy information.
In another, they create the setup they want the development teams
to see, by replacing the passwords with fake information and replacing the
purchase histories with randomly generated ones.
They would then create a redaction bookmark on the parent snapshot,
using snapshots on the two clones as redaction snapshots.
The parent can then be sent, redacted, to the target
server where the research and development teams have access.
Finally, incremental sends from the parent snapshot to each of the clones can be
sent
to and received on the target server; these snapshots are identical to the
ones on the source, and are ready to be used, while the parent snapshot on the
target contains none of the username and password data present on the source,
because it was removed by the redacted send operation.
.
+.Sh SIGNALS
+See
+.Fl v .
+.
.Sh EXAMPLES
.\" These are, respectively, examples 12, 13 from zfs.8
.\" Make sure to update them bidirectionally
.Ss Example 1 : No Remotely Replicating ZFS Data
The following commands send a full stream and then an incremental stream to a
remote machine, restoring them into
.Em poolB/received/fs@a
and
.Em poolB/received/fs@b ,
respectively.
.Em poolB
must contain the file system
.Em poolB/received ,
and must not initially contain
.Em poolB/received/fs .
.Bd -literal -compact -offset Ds
.No # Nm zfs Cm send Ar pool/fs@a |
.No " " Nm ssh Ar host Nm zfs Cm receive Ar poolB/received/fs Ns @ Ns Ar a
.No # Nm zfs Cm send Fl i Ar a pool/fs@b |
.No " " Nm ssh Ar host Nm zfs Cm receive Ar poolB/received/fs
.Ed
.
.Ss Example 2 : No Using the Nm zfs Cm receive Fl d No Option
The following command sends a full stream of
.Ar poolA/fsA/fsB@snap
to a remote machine, receiving it into
.Ar poolB/received/fsA/fsB@snap .
The
.Ar fsA/fsB@snap
portion of the received snapshot's name is determined from the name of the sent
snapshot.
.Ar poolB
must contain the file system
.Ar poolB/received .
If
.Ar poolB/received/fsA
does not exist, it is created as an empty file system.
.Bd -literal -compact -offset Ds
.No # Nm zfs Cm send Ar poolA/fsA/fsB@snap |
.No " " Nm ssh Ar host Nm zfs Cm receive Fl d Ar poolB/received
.Ed
.
.Sh SEE ALSO
.Xr zfs-bookmark 8 ,
.Xr zfs-receive 8 ,
.Xr zfs-redact 8 ,
.Xr zfs-snapshot 8
diff --git a/sys/contrib/openzfs/man/man8/zpool-events.8 b/sys/contrib/openzfs/man/man8/zpool-events.8
index 341f902fe66e..e1436f6ded57 100644
--- a/sys/contrib/openzfs/man/man8/zpool-events.8
+++ b/sys/contrib/openzfs/man/man8/zpool-events.8
@@ -1,487 +1,466 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or https://opensource.org/licenses/CDDL-1.0.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
.\" Copyright (c) 2017 Datto Inc.
.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd May 27, 2021
+.Dd July 11, 2023
.Dt ZPOOL-EVENTS 8
.Os
.
.Sh NAME
.Nm zpool-events
.Nd list recent events generated by kernel
.Sh SYNOPSIS
.Nm zpool
.Cm events
.Op Fl vHf
.Op Ar pool
.Nm zpool
.Cm events
.Fl c
.
.Sh DESCRIPTION
Lists all recent events generated by the ZFS kernel modules.
These events are consumed by the
.Xr zed 8
and used to automate administrative tasks such as replacing a failed device
with a hot spare.
For more information about the subclasses and event payloads
that can be generated see
.Sx EVENTS
and the following sections.
.
.Sh OPTIONS
.Bl -tag -compact -width Ds
.It Fl c
Clear all previous events.
.It Fl f
Follow mode.
.It Fl H
Scripted mode.
Do not display headers, and separate fields by a
single tab instead of arbitrary space.
.It Fl v
Print the entire payload for each event.
.El
.
.Sh EVENTS
These are the different event subclasses.
The full event name would be
.Sy ereport.fs.zfs.\& Ns Em SUBCLASS ,
but only the last part is listed here.
.Pp
.Bl -tag -compact -width "vdev.bad_guid_sum"
.It Sy checksum
Issued when a checksum error has been detected.
.It Sy io
Issued when there is an I/O error in a vdev in the pool.
.It Sy data
Issued when there have been data errors in the pool.
.It Sy deadman
Issued when an I/O request is determined to be "hung", this can be caused
by lost completion events due to flaky hardware or drivers.
See
.Sy zfs_deadman_failmode
in
.Xr zfs 4
for additional information regarding "hung" I/O detection and configuration.
.It Sy delay
Issued when a completed I/O request exceeds the maximum allowed time
specified by the
.Sy zio_slow_io_ms
module parameter.
This can be an indicator of problems with the underlying storage device.
The number of delay events is ratelimited by the
.Sy zfs_slow_io_events_per_second
module parameter.
.It Sy config
Issued every time a vdev change have been done to the pool.
.It Sy zpool
Issued when a pool cannot be imported.
.It Sy zpool.destroy
Issued when a pool is destroyed.
.It Sy zpool.export
Issued when a pool is exported.
.It Sy zpool.import
Issued when a pool is imported.
.It Sy zpool.reguid
Issued when a REGUID (new unique identifier for the pool have been regenerated)
have been detected.
.It Sy vdev.unknown
Issued when the vdev is unknown.
Such as trying to clear device errors on a vdev that have failed/been kicked
from the system/pool and is no longer available.
.It Sy vdev.open_failed
Issued when a vdev could not be opened (because it didn't exist for example).
.It Sy vdev.corrupt_data
Issued when corrupt data have been detected on a vdev.
.It Sy vdev.no_replicas
Issued when there are no more replicas to sustain the pool.
This would lead to the pool being
.Em DEGRADED .
.It Sy vdev.bad_guid_sum
Issued when a missing device in the pool have been detected.
.It Sy vdev.too_small
Issued when the system (kernel) have removed a device, and ZFS
notices that the device isn't there any more.
This is usually followed by a
.Sy probe_failure
event.
.It Sy vdev.bad_label
Issued when the label is OK but invalid.
.It Sy vdev.bad_ashift
Issued when the ashift alignment requirement has increased.
.It Sy vdev.remove
Issued when a vdev is detached from a mirror (or a spare detached from a
vdev where it have been used to replace a failed drive - only works if
the original drive have been re-added).
.It Sy vdev.clear
Issued when clearing device errors in a pool.
Such as running
.Nm zpool Cm clear
on a device in the pool.
.It Sy vdev.check
Issued when a check to see if a given vdev could be opened is started.
.It Sy vdev.spare
Issued when a spare have kicked in to replace a failed device.
.It Sy vdev.autoexpand
Issued when a vdev can be automatically expanded.
.It Sy io_failure
Issued when there is an I/O failure in a vdev in the pool.
.It Sy probe_failure
Issued when a probe fails on a vdev.
This would occur if a vdev
have been kicked from the system outside of ZFS (such as the kernel
have removed the device).
.It Sy log_replay
Issued when the intent log cannot be replayed.
The can occur in the case of a missing or damaged log device.
.It Sy resilver.start
Issued when a resilver is started.
.It Sy resilver.finish
Issued when the running resilver have finished.
.It Sy scrub.start
Issued when a scrub is started on a pool.
.It Sy scrub.finish
Issued when a pool has finished scrubbing.
.It Sy scrub.abort
Issued when a scrub is aborted on a pool.
.It Sy scrub.resume
Issued when a scrub is resumed on a pool.
.It Sy scrub.paused
Issued when a scrub is paused on a pool.
.It Sy bootfs.vdev.attach
.El
.
.Sh PAYLOADS
This is the payload (data, information) that accompanies an
event.
.Pp
For
.Xr zed 8 ,
these are set to uppercase and prefixed with
.Sy ZEVENT_ .
.Pp
.Bl -tag -compact -width "vdev_cksum_errors"
.It Sy pool
Pool name.
.It Sy pool_failmode
Failmode -
.Sy wait ,
.Sy continue ,
or
.Sy panic .
See the
.Sy failmode
property in
.Xr zpoolprops 7
for more information.
.It Sy pool_guid
The GUID of the pool.
.It Sy pool_context
The load state for the pool (0=none, 1=open, 2=import, 3=tryimport, 4=recover
5=error).
.It Sy vdev_guid
The GUID of the vdev in question (the vdev failing or operated upon with
.Nm zpool Cm clear ,
etc.).
.It Sy vdev_type
Type of vdev -
.Sy disk ,
.Sy file ,
.Sy mirror ,
etc.
See the
.Sy Virtual Devices
section of
.Xr zpoolconcepts 7
for more information on possible values.
.It Sy vdev_path
Full path of the vdev, including any
.Em -partX .
.It Sy vdev_devid
ID of vdev (if any).
.It Sy vdev_fru
Physical FRU location.
.It Sy vdev_state
State of vdev (0=uninitialized, 1=closed, 2=offline, 3=removed, 4=failed to
open, 5=faulted, 6=degraded, 7=healthy).
.It Sy vdev_ashift
The ashift value of the vdev.
.It Sy vdev_complete_ts
The time the last I/O request completed for the specified vdev.
.It Sy vdev_delta_ts
The time since the last I/O request completed for the specified vdev.
.It Sy vdev_spare_paths
List of spares, including full path and any
.Em -partX .
.It Sy vdev_spare_guids
GUID(s) of spares.
.It Sy vdev_read_errors
How many read errors that have been detected on the vdev.
.It Sy vdev_write_errors
How many write errors that have been detected on the vdev.
.It Sy vdev_cksum_errors
How many checksum errors that have been detected on the vdev.
.It Sy parent_guid
GUID of the vdev parent.
.It Sy parent_type
Type of parent.
See
.Sy vdev_type .
.It Sy parent_path
Path of the vdev parent (if any).
.It Sy parent_devid
ID of the vdev parent (if any).
.It Sy zio_objset
The object set number for a given I/O request.
.It Sy zio_object
The object number for a given I/O request.
.It Sy zio_level
The indirect level for the block.
Level 0 is the lowest level and includes data blocks.
Values > 0 indicate metadata blocks at the appropriate level.
.It Sy zio_blkid
The block ID for a given I/O request.
.It Sy zio_err
The error number for a failure when handling a given I/O request,
compatible with
.Xr errno 3
with the value of
.Sy EBADE
used to indicate a ZFS checksum error.
.It Sy zio_offset
The offset in bytes of where to write the I/O request for the specified vdev.
.It Sy zio_size
The size in bytes of the I/O request.
.It Sy zio_flags
The current flags describing how the I/O request should be handled.
See the
.Sy I/O FLAGS
section for the full list of I/O flags.
.It Sy zio_stage
The current stage of the I/O in the pipeline.
See the
.Sy I/O STAGES
section for a full list of all the I/O stages.
.It Sy zio_pipeline
The valid pipeline stages for the I/O.
See the
.Sy I/O STAGES
section for a full list of all the I/O stages.
.It Sy zio_delay
The time elapsed (in nanoseconds) waiting for the block layer to complete the
I/O request.
Unlike
.Sy zio_delta ,
this does not include any vdev queuing time and is
therefore solely a measure of the block layer performance.
.It Sy zio_timestamp
The time when a given I/O request was submitted.
.It Sy zio_delta
The time required to service a given I/O request.
.It Sy prev_state
The previous state of the vdev.
-.It Sy cksum_expected
-The expected checksum value for the block.
-.It Sy cksum_actual
-The actual checksum value for an errant block.
.It Sy cksum_algorithm
Checksum algorithm used.
See
.Xr zfsprops 7
for more information on the available checksum algorithms.
.It Sy cksum_byteswap
Whether or not the data is byteswapped.
.It Sy bad_ranges
.No [\& Ns Ar start , end )
pairs of corruption offsets.
Offsets are always aligned on a 64-bit boundary,
and can include some gaps of non-corruption.
(See
.Sy bad_ranges_min_gap )
.It Sy bad_ranges_min_gap
In order to bound the size of the
.Sy bad_ranges
array, gaps of non-corruption
less than or equal to
.Sy bad_ranges_min_gap
bytes have been merged with
adjacent corruption.
Always at least 8 bytes, since corruption is detected on a 64-bit word basis.
.It Sy bad_range_sets
This array has one element per range in
.Sy bad_ranges .
Each element contains
the count of bits in that range which were clear in the good data and set
in the bad data.
.It Sy bad_range_clears
This array has one element per range in
.Sy bad_ranges .
Each element contains
the count of bits for that range which were set in the good data and clear in
the bad data.
.It Sy bad_set_bits
If this field exists, it is an array of
.Pq Ar bad data No & ~( Ns Ar good data ) ;
that is, the bits set in the bad data which are cleared in the good data.
Each element corresponds a byte whose offset is in a range in
.Sy bad_ranges ,
and the array is ordered by offset.
Thus, the first element is the first byte in the first
.Sy bad_ranges
range, and the last element is the last byte in the last
.Sy bad_ranges
range.
.It Sy bad_cleared_bits
Like
.Sy bad_set_bits ,
but contains
.Pq Ar good data No & ~( Ns Ar bad data ) ;
that is, the bits set in the good data which are cleared in the bad data.
-.It Sy bad_set_histogram
-If this field exists, it is an array of counters.
-Each entry counts bits set in a particular bit of a big-endian uint64 type.
-The first entry counts bits
-set in the high-order bit of the first byte, the 9th byte, etc, and the last
-entry counts bits set of the low-order bit of the 8th byte, the 16th byte, etc.
-This information is useful for observing a stuck bit in a parallel data path,
-such as IDE or parallel SCSI.
-.It Sy bad_cleared_histogram
-If this field exists, it is an array of counters.
-Each entry counts bit clears in a particular bit of a big-endian uint64 type.
-The first entry counts bits
-clears of the high-order bit of the first byte, the 9th byte, etc, and the
-last entry counts clears of the low-order bit of the 8th byte, the 16th byte,
-etc.
-This information is useful for observing a stuck bit in a parallel data
-path, such as IDE or parallel SCSI.
.El
.
.Sh I/O STAGES
The ZFS I/O pipeline is comprised of various stages which are defined below.
The individual stages are used to construct these basic I/O
operations: Read, Write, Free, Claim, and Ioctl.
These stages may be
set on an event to describe the life cycle of a given I/O request.
.Pp
.TS
tab(:);
l l l .
Stage:Bit Mask:Operations
_:_:_
ZIO_STAGE_OPEN:0x00000001:RWFCI
ZIO_STAGE_READ_BP_INIT:0x00000002:R----
ZIO_STAGE_WRITE_BP_INIT:0x00000004:-W---
ZIO_STAGE_FREE_BP_INIT:0x00000008:--F--
ZIO_STAGE_ISSUE_ASYNC:0x00000010:RWF--
ZIO_STAGE_WRITE_COMPRESS:0x00000020:-W---
ZIO_STAGE_ENCRYPT:0x00000040:-W---
ZIO_STAGE_CHECKSUM_GENERATE:0x00000080:-W---
ZIO_STAGE_NOP_WRITE:0x00000100:-W---
ZIO_STAGE_BRT_FREE:0x00000200:--F--
ZIO_STAGE_DDT_READ_START:0x00000400:R----
ZIO_STAGE_DDT_READ_DONE:0x00000800:R----
ZIO_STAGE_DDT_WRITE:0x00001000:-W---
ZIO_STAGE_DDT_FREE:0x00002000:--F--
ZIO_STAGE_GANG_ASSEMBLE:0x00004000:RWFC-
ZIO_STAGE_GANG_ISSUE:0x00008000:RWFC-
ZIO_STAGE_DVA_THROTTLE:0x00010000:-W---
ZIO_STAGE_DVA_ALLOCATE:0x00020000:-W---
ZIO_STAGE_DVA_FREE:0x00040000:--F--
ZIO_STAGE_DVA_CLAIM:0x00080000:---C-
ZIO_STAGE_READY:0x00100000:RWFCI
ZIO_STAGE_VDEV_IO_START:0x00200000:RW--I
ZIO_STAGE_VDEV_IO_DONE:0x00400000:RW--I
ZIO_STAGE_VDEV_IO_ASSESS:0x00800000:RW--I
ZIO_STAGE_CHECKSUM_VERIFY:0x01000000:R----
ZIO_STAGE_DONE:0x02000000:RWFCI
.TE
.
.Sh I/O FLAGS
Every I/O request in the pipeline contains a set of flags which describe its
function and are used to govern its behavior.
These flags will be set in an event as a
.Sy zio_flags
payload entry.
.Pp
.TS
tab(:);
l l .
Flag:Bit Mask
_:_
ZIO_FLAG_DONT_AGGREGATE:0x00000001
ZIO_FLAG_IO_REPAIR:0x00000002
ZIO_FLAG_SELF_HEAL:0x00000004
ZIO_FLAG_RESILVER:0x00000008
ZIO_FLAG_SCRUB:0x00000010
ZIO_FLAG_SCAN_THREAD:0x00000020
ZIO_FLAG_PHYSICAL:0x00000040
ZIO_FLAG_CANFAIL:0x00000080
ZIO_FLAG_SPECULATIVE:0x00000100
ZIO_FLAG_CONFIG_WRITER:0x00000200
ZIO_FLAG_DONT_RETRY:0x00000400
ZIO_FLAG_NODATA:0x00001000
ZIO_FLAG_INDUCE_DAMAGE:0x00002000
ZIO_FLAG_IO_ALLOCATING:0x00004000
ZIO_FLAG_IO_RETRY:0x00008000
ZIO_FLAG_PROBE:0x00010000
ZIO_FLAG_TRYHARD:0x00020000
ZIO_FLAG_OPTIONAL:0x00040000
ZIO_FLAG_DONT_QUEUE:0x00080000
ZIO_FLAG_DONT_PROPAGATE:0x00100000
ZIO_FLAG_IO_BYPASS:0x00200000
ZIO_FLAG_IO_REWRITE:0x00400000
ZIO_FLAG_RAW_COMPRESS:0x00800000
ZIO_FLAG_RAW_ENCRYPT:0x01000000
ZIO_FLAG_GANG_CHILD:0x02000000
ZIO_FLAG_DDT_CHILD:0x04000000
ZIO_FLAG_GODFATHER:0x08000000
ZIO_FLAG_NOPWRITE:0x10000000
ZIO_FLAG_REEXECUTED:0x20000000
ZIO_FLAG_DELEGATED:0x40000000
ZIO_FLAG_FASTWRITE:0x80000000
.TE
.
.Sh SEE ALSO
.Xr zfs 4 ,
.Xr zed 8 ,
.Xr zpool-wait 8
diff --git a/sys/contrib/openzfs/man/man8/zpool.8 b/sys/contrib/openzfs/man/man8/zpool.8
index e8eadffa6fcf..4c4020bdd810 100644
--- a/sys/contrib/openzfs/man/man8/zpool.8
+++ b/sys/contrib/openzfs/man/man8/zpool.8
@@ -1,588 +1,596 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or https://opensource.org/licenses/CDDL-1.0.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
.\" Copyright (c) 2017 Datto Inc.
.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd March 16, 2022
.Dt ZPOOL 8
.Os
.
.Sh NAME
.Nm zpool
.Nd configure ZFS storage pools
.Sh SYNOPSIS
.Nm
.Fl ?V
.Nm
.Cm version
.Nm
.Cm subcommand
.Op Ar arguments
.
.Sh DESCRIPTION
The
.Nm
command configures ZFS storage pools.
A storage pool is a collection of devices that provides physical storage and
data replication for ZFS datasets.
All datasets within a storage pool share the same space.
See
.Xr zfs 8
for information on managing datasets.
.Pp
For an overview of creating and managing ZFS storage pools see the
.Xr zpoolconcepts 7
manual page.
.
.Sh SUBCOMMANDS
All subcommands that modify state are logged persistently to the pool in their
original form.
.Pp
The
.Nm
command provides subcommands to create and destroy storage pools, add capacity
to storage pools, and provide information about the storage pools.
The following subcommands are supported:
.Bl -tag -width Ds
.It Xo
.Nm
.Fl ?\&
.Xc
Displays a help message.
.It Xo
.Nm
.Fl V , -version
.Xc
.It Xo
.Nm
.Cm version
.Xc
Displays the software version of the
.Nm
userland utility and the ZFS kernel module.
.El
.
.Ss Creation
.Bl -tag -width Ds
.It Xr zpool-create 8
Creates a new storage pool containing the virtual devices specified on the
command line.
.It Xr zpool-initialize 8
Begins initializing by writing to all unallocated regions on the specified
devices, or all eligible devices in the pool if no individual devices are
specified.
.El
.
.Ss Destruction
.Bl -tag -width Ds
.It Xr zpool-destroy 8
Destroys the given pool, freeing up any devices for other use.
.It Xr zpool-labelclear 8
Removes ZFS label information from the specified
.Ar device .
.El
.
.Ss Virtual Devices
.Bl -tag -width Ds
.It Xo
.Xr zpool-attach 8 Ns / Ns Xr zpool-detach 8
.Xc
-Increases or decreases redundancy by
-.Cm attach Ns ing or
-.Cm detach Ns ing a device on an existing vdev (virtual device).
+Converts a non-redundant disk into a mirror, or increases
+the redundancy level of an existing mirror
+.Cm ( attach Ns ), or performs the inverse operation (
+.Cm detach Ns ).
.It Xo
.Xr zpool-add 8 Ns / Ns Xr zpool-remove 8
.Xc
Adds the specified virtual devices to the given pool,
or removes the specified device from the pool.
.It Xr zpool-replace 8
Replaces an existing device (which may be faulted) with a new one.
.It Xr zpool-split 8
Creates a new pool by splitting all mirrors in an existing pool (which decreases
its redundancy).
.El
.
.Ss Properties
Available pool properties listed in the
.Xr zpoolprops 7
manual page.
.Bl -tag -width Ds
.It Xr zpool-list 8
Lists the given pools along with a health status and space usage.
.It Xo
.Xr zpool-get 8 Ns / Ns Xr zpool-set 8
.Xc
Retrieves the given list of properties
.Po
or all properties if
.Sy all
is used
.Pc
for the specified storage pool(s).
.El
.
.Ss Monitoring
.Bl -tag -width Ds
.It Xr zpool-status 8
Displays the detailed health status for the given pools.
.It Xr zpool-iostat 8
Displays logical I/O statistics for the given pools/vdevs.
Physical I/O operations may be observed via
.Xr iostat 1 .
.It Xr zpool-events 8
Lists all recent events generated by the ZFS kernel modules.
These events are consumed by the
.Xr zed 8
and used to automate administrative tasks such as replacing a failed device
with a hot spare.
That manual page also describes the subclasses and event payloads
that can be generated.
.It Xr zpool-history 8
Displays the command history of the specified pool(s) or all pools if no pool is
specified.
.El
.
.Ss Maintenance
.Bl -tag -width Ds
.It Xr zpool-scrub 8
Begins a scrub or resumes a paused scrub.
.It Xr zpool-checkpoint 8
Checkpoints the current state of
.Ar pool ,
which can be later restored by
.Nm zpool Cm import Fl -rewind-to-checkpoint .
.It Xr zpool-trim 8
Initiates an immediate on-demand TRIM operation for all of the free space in a
pool.
This operation informs the underlying storage devices of all blocks
in the pool which are no longer allocated and allows thinly provisioned
devices to reclaim the space.
.It Xr zpool-sync 8
This command forces all in-core dirty data to be written to the primary
pool storage and not the ZIL.
It will also update administrative information including quota reporting.
Without arguments,
.Nm zpool Cm sync
will sync all pools on the system.
Otherwise, it will sync only the specified pool(s).
.It Xr zpool-upgrade 8
Manage the on-disk format version of storage pools.
.It Xr zpool-wait 8
Waits until all background activity of the given types has ceased in the given
pool.
.El
.
.Ss Fault Resolution
.Bl -tag -width Ds
.It Xo
.Xr zpool-offline 8 Ns / Ns Xr zpool-online 8
.Xc
Takes the specified physical device offline or brings it online.
.It Xr zpool-resilver 8
Starts a resilver.
If an existing resilver is already running it will be restarted from the
beginning.
.It Xr zpool-reopen 8
Reopen all the vdevs associated with the pool.
.It Xr zpool-clear 8
Clears device errors in a pool.
.El
.
.Ss Import & Export
.Bl -tag -width Ds
.It Xr zpool-import 8
Make disks containing ZFS storage pools available for use on the system.
.It Xr zpool-export 8
Exports the given pools from the system.
.It Xr zpool-reguid 8
Generates a new unique identifier for the pool.
.El
.
.Sh EXIT STATUS
The following exit values are returned:
.Bl -tag -compact -offset 4n -width "a"
.It Sy 0
Successful completion.
.It Sy 1
An error occurred.
.It Sy 2
Invalid command line options were specified.
.El
.
.Sh EXAMPLES
-.\" Examples 1, 2, 3, 4, 11, 12 are shared with zpool-create.8.
-.\" Examples 5, 13 are shared with zpool-add.8.
-.\" Examples 6, 15 are shared with zpool-list.8.
-.\" Examples 7 are shared with zpool-destroy.8.
-.\" Examples 8 are shared with zpool-export.8.
-.\" Examples 9 are shared with zpool-import.8.
-.\" Examples 10 are shared with zpool-upgrade.8.
-.\" Examples 14 are shared with zpool-remove.8.
-.\" Examples 16 are shared with zpool-status.8.
-.\" Examples 13, 16 are also shared with zpool-iostat.8.
+.\" Examples 1, 2, 3, 4, 12, 13 are shared with zpool-create.8.
+.\" Examples 6, 14 are shared with zpool-add.8.
+.\" Examples 7, 16 are shared with zpool-list.8.
+.\" Examples 8 are shared with zpool-destroy.8.
+.\" Examples 9 are shared with zpool-export.8.
+.\" Examples 10 are shared with zpool-import.8.
+.\" Examples 11 are shared with zpool-upgrade.8.
+.\" Examples 15 are shared with zpool-remove.8.
+.\" Examples 17 are shared with zpool-status.8.
+.\" Examples 14, 17 are also shared with zpool-iostat.8.
.\" Make sure to update them omnidirectionally
.Ss Example 1 : No Creating a RAID-Z Storage Pool
The following command creates a pool with a single raidz root vdev that
consists of six disks:
.Dl # Nm zpool Cm create Ar tank Sy raidz Pa sda sdb sdc sdd sde sdf
.
.Ss Example 2 : No Creating a Mirrored Storage Pool
The following command creates a pool with two mirrors, where each mirror
contains two disks:
.Dl # Nm zpool Cm create Ar tank Sy mirror Pa sda sdb Sy mirror Pa sdc sdd
.
.Ss Example 3 : No Creating a ZFS Storage Pool by Using Partitions
The following command creates a non-redundant pool using two disk partitions:
.Dl # Nm zpool Cm create Ar tank Pa sda1 sdb2
.
.Ss Example 4 : No Creating a ZFS Storage Pool by Using Files
The following command creates a non-redundant pool using files.
While not recommended, a pool based on files can be useful for experimental
purposes.
.Dl # Nm zpool Cm create Ar tank Pa /path/to/file/a /path/to/file/b
.
-.Ss Example 5 : No Adding a Mirror to a ZFS Storage Pool
+.Ss Example 5 : No Making a non-mirrored ZFS Storage Pool mirrored
+The following command converts an existing single device
+.Ar sda
+into a mirror by attaching a second device to it,
+.Ar sdb .
+.Dl # Nm zpool Cm attach Ar tank Pa sda sdb
+.
+.Ss Example 6 : No Adding a Mirror to a ZFS Storage Pool
The following command adds two mirrored disks to the pool
.Ar tank ,
assuming the pool is already made up of two-way mirrors.
The additional space is immediately available to any datasets within the pool.
.Dl # Nm zpool Cm add Ar tank Sy mirror Pa sda sdb
.
-.Ss Example 6 : No Listing Available ZFS Storage Pools
+.Ss Example 7 : No Listing Available ZFS Storage Pools
The following command lists all available pools on the system.
In this case, the pool
.Ar zion
is faulted due to a missing device.
The results from this command are similar to the following:
.Bd -literal -compact -offset Ds
.No # Nm zpool Cm list
NAME SIZE ALLOC FREE EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
rpool 19.9G 8.43G 11.4G - 33% 42% 1.00x ONLINE -
tank 61.5G 20.0G 41.5G - 48% 32% 1.00x ONLINE -
zion - - - - - - - FAULTED -
.Ed
.
-.Ss Example 7 : No Destroying a ZFS Storage Pool
+.Ss Example 8 : No Destroying a ZFS Storage Pool
The following command destroys the pool
.Ar tank
and any datasets contained within:
.Dl # Nm zpool Cm destroy Fl f Ar tank
.
-.Ss Example 8 : No Exporting a ZFS Storage Pool
+.Ss Example 9 : No Exporting a ZFS Storage Pool
The following command exports the devices in pool
.Ar tank
so that they can be relocated or later imported:
.Dl # Nm zpool Cm export Ar tank
.
-.Ss Example 9 : No Importing a ZFS Storage Pool
+.Ss Example 10 : No Importing a ZFS Storage Pool
The following command displays available pools, and then imports the pool
.Ar tank
for use on the system.
The results from this command are similar to the following:
.Bd -literal -compact -offset Ds
.No # Nm zpool Cm import
pool: tank
id: 15451357997522795478
state: ONLINE
action: The pool can be imported using its name or numeric identifier.
config:
tank ONLINE
mirror ONLINE
sda ONLINE
sdb ONLINE
.No # Nm zpool Cm import Ar tank
.Ed
.
-.Ss Example 10 : No Upgrading All ZFS Storage Pools to the Current Version
+.Ss Example 11 : No Upgrading All ZFS Storage Pools to the Current Version
The following command upgrades all ZFS Storage pools to the current version of
the software:
.Bd -literal -compact -offset Ds
.No # Nm zpool Cm upgrade Fl a
This system is currently running ZFS version 2.
.Ed
.
-.Ss Example 11 : No Managing Hot Spares
+.Ss Example 12 : No Managing Hot Spares
The following command creates a new pool with an available hot spare:
.Dl # Nm zpool Cm create Ar tank Sy mirror Pa sda sdb Sy spare Pa sdc
.Pp
If one of the disks were to fail, the pool would be reduced to the degraded
state.
The failed device can be replaced using the following command:
.Dl # Nm zpool Cm replace Ar tank Pa sda sdd
.Pp
Once the data has been resilvered, the spare is automatically removed and is
made available for use should another device fail.
The hot spare can be permanently removed from the pool using the following
command:
.Dl # Nm zpool Cm remove Ar tank Pa sdc
.
-.Ss Example 12 : No Creating a ZFS Pool with Mirrored Separate Intent Logs
+.Ss Example 13 : No Creating a ZFS Pool with Mirrored Separate Intent Logs
The following command creates a ZFS storage pool consisting of two, two-way
mirrors and mirrored log devices:
.Dl # Nm zpool Cm create Ar pool Sy mirror Pa sda sdb Sy mirror Pa sdc sdd Sy log mirror Pa sde sdf
.
-.Ss Example 13 : No Adding Cache Devices to a ZFS Pool
+.Ss Example 14 : No Adding Cache Devices to a ZFS Pool
The following command adds two disks for use as cache devices to a ZFS storage
pool:
.Dl # Nm zpool Cm add Ar pool Sy cache Pa sdc sdd
.Pp
Once added, the cache devices gradually fill with content from main memory.
Depending on the size of your cache devices, it could take over an hour for
them to fill.
Capacity and reads can be monitored using the
.Cm iostat
subcommand as follows:
.Dl # Nm zpool Cm iostat Fl v Ar pool 5
.
-.Ss Example 14 : No Removing a Mirrored top-level (Log or Data) Device
+.Ss Example 15 : No Removing a Mirrored top-level (Log or Data) Device
The following commands remove the mirrored log device
.Sy mirror-2
and mirrored top-level data device
.Sy mirror-1 .
.Pp
Given this configuration:
.Bd -literal -compact -offset Ds
pool: tank
state: ONLINE
scrub: none requested
config:
NAME STATE READ WRITE CKSUM
tank ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
sda ONLINE 0 0 0
sdb ONLINE 0 0 0
mirror-1 ONLINE 0 0 0
sdc ONLINE 0 0 0
sdd ONLINE 0 0 0
logs
mirror-2 ONLINE 0 0 0
sde ONLINE 0 0 0
sdf ONLINE 0 0 0
.Ed
.Pp
The command to remove the mirrored log
.Ar mirror-2 No is :
.Dl # Nm zpool Cm remove Ar tank mirror-2
.Pp
The command to remove the mirrored data
.Ar mirror-1 No is :
.Dl # Nm zpool Cm remove Ar tank mirror-1
.
-.Ss Example 15 : No Displaying expanded space on a device
+.Ss Example 16 : No Displaying expanded space on a device
The following command displays the detailed information for the pool
.Ar data .
This pool is comprised of a single raidz vdev where one of its devices
increased its capacity by 10 GiB.
In this example, the pool will not be able to utilize this extra capacity until
all the devices under the raidz vdev have been expanded.
.Bd -literal -compact -offset Ds
.No # Nm zpool Cm list Fl v Ar data
NAME SIZE ALLOC FREE EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
data 23.9G 14.6G 9.30G - 48% 61% 1.00x ONLINE -
raidz1 23.9G 14.6G 9.30G - 48%
sda - - - - -
sdb - - - 10G -
sdc - - - - -
.Ed
.
-.Ss Example 16 : No Adding output columns
+.Ss Example 17 : No Adding output columns
Additional columns can be added to the
.Nm zpool Cm status No and Nm zpool Cm iostat No output with Fl c .
.Bd -literal -compact -offset Ds
.No # Nm zpool Cm status Fl c Pa vendor , Ns Pa model , Ns Pa size
NAME STATE READ WRITE CKSUM vendor model size
tank ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
U1 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U10 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U11 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U12 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U13 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U14 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
.No # Nm zpool Cm iostat Fl vc Pa size
capacity operations bandwidth
pool alloc free read write read write size
---------- ----- ----- ----- ----- ----- ----- ----
rpool 14.6G 54.9G 4 55 250K 2.69M
sda1 14.6G 54.9G 4 55 250K 2.69M 70G
---------- ----- ----- ----- ----- ----- ----- ----
.Ed
.
.Sh ENVIRONMENT VARIABLES
.Bl -tag -compact -width "ZPOOL_IMPORT_UDEV_TIMEOUT_MS"
.It Sy ZFS_ABORT
Cause
.Nm
to dump core on exit for the purposes of running
.Sy ::findleaks .
.It Sy ZFS_COLOR
Use ANSI color in
.Nm zpool Cm status
and
.Nm zpool Cm iostat
output.
.It Sy ZPOOL_IMPORT_PATH
The search path for devices or files to use with the pool.
This is a colon-separated list of directories in which
.Nm
looks for device nodes and files.
Similar to the
.Fl d
option in
.Nm zpool import .
.It Sy ZPOOL_IMPORT_UDEV_TIMEOUT_MS
The maximum time in milliseconds that
.Nm zpool import
will wait for an expected device to be available.
.It Sy ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE
If set, suppress warning about non-native vdev ashift in
.Nm zpool Cm status .
The value is not used, only the presence or absence of the variable matters.
.It Sy ZPOOL_VDEV_NAME_GUID
Cause
.Nm
subcommands to output vdev guids by default.
This behavior is identical to the
.Nm zpool Cm status Fl g
command line option.
.It Sy ZPOOL_VDEV_NAME_FOLLOW_LINKS
Cause
.Nm
subcommands to follow links for vdev names by default.
This behavior is identical to the
.Nm zpool Cm status Fl L
command line option.
.It Sy ZPOOL_VDEV_NAME_PATH
Cause
.Nm
subcommands to output full vdev path names by default.
This behavior is identical to the
.Nm zpool Cm status Fl P
command line option.
.It Sy ZFS_VDEV_DEVID_OPT_OUT
Older OpenZFS implementations had issues when attempting to display pool
config vdev names if a
.Sy devid
NVP value is present in the pool's config.
.Pp
For example, a pool that originated on illumos platform would have a
.Sy devid
value in the config and
.Nm zpool Cm status
would fail when listing the config.
This would also be true for future Linux-based pools.
.Pp
A pool can be stripped of any
.Sy devid
values on import or prevented from adding
them on
.Nm zpool Cm create
or
.Nm zpool Cm add
by setting
.Sy ZFS_VDEV_DEVID_OPT_OUT .
.Pp
.It Sy ZPOOL_SCRIPTS_AS_ROOT
Allow a privileged user to run
.Nm zpool Cm status Ns / Ns Cm iostat Fl c .
Normally, only unprivileged users are allowed to run
.Fl c .
.It Sy ZPOOL_SCRIPTS_PATH
The search path for scripts when running
.Nm zpool Cm status Ns / Ns Cm iostat Fl c .
This is a colon-separated list of directories and overrides the default
.Pa ~/.zpool.d
and
.Pa /etc/zfs/zpool.d
search paths.
.It Sy ZPOOL_SCRIPTS_ENABLED
Allow a user to run
.Nm zpool Cm status Ns / Ns Cm iostat Fl c .
If
.Sy ZPOOL_SCRIPTS_ENABLED
is not set, it is assumed that the user is allowed to run
.Nm zpool Cm status Ns / Ns Cm iostat Fl c .
.\" Shared with zfs.8
.It Sy ZFS_MODULE_TIMEOUT
Time, in seconds, to wait for
.Pa /dev/zfs
to appear.
Defaults to
.Sy 10 ,
max
.Sy 600 Pq 10 minutes .
If
.Pf < Sy 0 ,
wait forever; if
.Sy 0 ,
don't wait.
.El
.
.Sh INTERFACE STABILITY
.Sy Evolving
.
.Sh SEE ALSO
.Xr zfs 4 ,
.Xr zpool-features 7 ,
.Xr zpoolconcepts 7 ,
.Xr zpoolprops 7 ,
.Xr zed 8 ,
.Xr zfs 8 ,
.Xr zpool-add 8 ,
.Xr zpool-attach 8 ,
.Xr zpool-checkpoint 8 ,
.Xr zpool-clear 8 ,
.Xr zpool-create 8 ,
.Xr zpool-destroy 8 ,
.Xr zpool-detach 8 ,
.Xr zpool-events 8 ,
.Xr zpool-export 8 ,
.Xr zpool-get 8 ,
.Xr zpool-history 8 ,
.Xr zpool-import 8 ,
.Xr zpool-initialize 8 ,
.Xr zpool-iostat 8 ,
.Xr zpool-labelclear 8 ,
.Xr zpool-list 8 ,
.Xr zpool-offline 8 ,
.Xr zpool-online 8 ,
.Xr zpool-reguid 8 ,
.Xr zpool-remove 8 ,
.Xr zpool-reopen 8 ,
.Xr zpool-replace 8 ,
.Xr zpool-resilver 8 ,
.Xr zpool-scrub 8 ,
.Xr zpool-set 8 ,
.Xr zpool-split 8 ,
.Xr zpool-status 8 ,
.Xr zpool-sync 8 ,
.Xr zpool-trim 8 ,
.Xr zpool-upgrade 8 ,
.Xr zpool-wait 8
diff --git a/sys/contrib/openzfs/module/Kbuild.in b/sys/contrib/openzfs/module/Kbuild.in
index 485331ac655e..c132171592a8 100644
--- a/sys/contrib/openzfs/module/Kbuild.in
+++ b/sys/contrib/openzfs/module/Kbuild.in
@@ -1,497 +1,498 @@
# When integrated in to a monolithic kernel the spl module must appear
# first. This ensures its module initialization function is run before
# any of the other module initialization functions which depend on it.
ZFS_MODULE_CFLAGS += -std=gnu99 -Wno-declaration-after-statement
ZFS_MODULE_CFLAGS += -Wmissing-prototypes
ZFS_MODULE_CFLAGS += @KERNEL_DEBUG_CFLAGS@ @NO_FORMAT_ZERO_LENGTH@
ifneq ($(KBUILD_EXTMOD),)
zfs_include = @abs_top_srcdir@/include
icp_include = @abs_srcdir@/icp/include
zstd_include = @abs_srcdir@/zstd/include
ZFS_MODULE_CFLAGS += -include @abs_top_builddir@/zfs_config.h
ZFS_MODULE_CFLAGS += -I@abs_top_builddir@/include
src = @abs_srcdir@
obj = @abs_builddir@
else
zfs_include = $(srctree)/include/zfs
icp_include = $(srctree)/$(src)/icp/include
zstd_include = $(srctree)/$(src)/zstd/include
ZFS_MODULE_CFLAGS += -include $(zfs_include)/zfs_config.h
endif
ZFS_MODULE_CFLAGS += -I$(zfs_include)/os/linux/kernel
ZFS_MODULE_CFLAGS += -I$(zfs_include)/os/linux/spl
ZFS_MODULE_CFLAGS += -I$(zfs_include)/os/linux/zfs
ZFS_MODULE_CFLAGS += -I$(zfs_include)
ZFS_MODULE_CPPFLAGS += -D_KERNEL
ZFS_MODULE_CPPFLAGS += @KERNEL_DEBUG_CPPFLAGS@
# KASAN enables -Werror=frame-larger-than=1024, which
# breaks oh so many parts of our build.
ifeq ($(CONFIG_KASAN),y)
ZFS_MODULE_CFLAGS += -Wno-error=frame-larger-than=
endif
# Generated binary search code is particularly bad with this optimization.
# Oddly, range_tree.c is not affected when unrolling is not done and dsl_scan.c
# is not affected when unrolling is done.
# Disable it until the following upstream issue is resolved:
# https://github.com/llvm/llvm-project/issues/62790
ifeq ($(CONFIG_X86),y)
ifeq ($(CONFIG_CC_IS_CLANG),y)
CFLAGS_zfs/dsl_scan.o += -mllvm -x86-cmov-converter=false
CFLAGS_zfs/metaslab.o += -mllvm -x86-cmov-converter=false
CFLAGS_zfs/range_tree.o += -mllvm -x86-cmov-converter=false
CFLAGS_zfs/zap_micro.o += -mllvm -x86-cmov-converter=false
endif
endif
ifneq ($(KBUILD_EXTMOD),)
@CONFIG_QAT_TRUE@ZFS_MODULE_CFLAGS += -I@QAT_SRC@/include
@CONFIG_QAT_TRUE@KBUILD_EXTRA_SYMBOLS += @QAT_SYMBOLS@
endif
asflags-y := $(ZFS_MODULE_CFLAGS) $(ZFS_MODULE_CPPFLAGS)
ccflags-y := $(ZFS_MODULE_CFLAGS) $(ZFS_MODULE_CPPFLAGS)
ifeq ($(CONFIG_ARM64),y)
CFLAGS_REMOVE_zcommon/zfs_fletcher_aarch64_neon.o += -mgeneral-regs-only
CFLAGS_REMOVE_zfs/vdev_raidz_math_aarch64_neon.o += -mgeneral-regs-only
CFLAGS_REMOVE_zfs/vdev_raidz_math_aarch64_neonx2.o += -mgeneral-regs-only
endif
# Suppress unused-value warnings in sparc64 architecture headers
ccflags-$(CONFIG_SPARC64) += -Wno-unused-value
obj-$(CONFIG_ZFS) := spl.o zfs.o
SPL_OBJS := \
spl-atomic.o \
spl-condvar.o \
spl-cred.o \
spl-err.o \
spl-generic.o \
spl-kmem-cache.o \
spl-kmem.o \
spl-kstat.o \
spl-proc.o \
spl-procfs-list.o \
spl-taskq.o \
spl-thread.o \
spl-trace.o \
spl-tsd.o \
spl-vmem.o \
spl-xdr.o \
spl-zlib.o \
spl-zone.o
spl-objs += $(addprefix os/linux/spl/,$(SPL_OBJS))
zfs-objs += avl/avl.o
ICP_OBJS := \
algs/aes/aes_impl.o \
algs/aes/aes_impl_generic.o \
algs/aes/aes_modes.o \
algs/blake3/blake3.o \
algs/blake3/blake3_generic.o \
algs/blake3/blake3_impl.o \
algs/edonr/edonr.o \
algs/modes/cbc.o \
algs/modes/ccm.o \
algs/modes/ctr.o \
algs/modes/ecb.o \
algs/modes/gcm.o \
algs/modes/gcm_generic.o \
algs/modes/modes.o \
algs/sha2/sha2_generic.o \
algs/sha2/sha256_impl.o \
algs/sha2/sha512_impl.o \
algs/skein/skein.o \
algs/skein/skein_block.o \
algs/skein/skein_iv.o \
api/kcf_cipher.o \
api/kcf_ctxops.o \
api/kcf_mac.o \
core/kcf_callprov.o \
core/kcf_mech_tabs.o \
core/kcf_prov_lib.o \
core/kcf_prov_tabs.o \
core/kcf_sched.o \
illumos-crypto.o \
io/aes.o \
io/sha2_mod.o \
io/skein_mod.o \
spi/kcf_spi.o
ICP_OBJS_X86_64 := \
asm-x86_64/aes/aes_aesni.o \
asm-x86_64/aes/aes_amd64.o \
asm-x86_64/aes/aeskey.o \
asm-x86_64/blake3/blake3_avx2.o \
asm-x86_64/blake3/blake3_avx512.o \
asm-x86_64/blake3/blake3_sse2.o \
asm-x86_64/blake3/blake3_sse41.o \
asm-x86_64/sha2/sha256-x86_64.o \
asm-x86_64/sha2/sha512-x86_64.o \
asm-x86_64/modes/aesni-gcm-x86_64.o \
asm-x86_64/modes/gcm_pclmulqdq.o \
asm-x86_64/modes/ghash-x86_64.o
ICP_OBJS_X86 := \
algs/aes/aes_impl_aesni.o \
algs/aes/aes_impl_x86-64.o \
algs/modes/gcm_pclmulqdq.o
ICP_OBJS_ARM := \
asm-arm/sha2/sha256-armv7.o \
asm-arm/sha2/sha512-armv7.o
ICP_OBJS_ARM64 := \
asm-aarch64/blake3/b3_aarch64_sse2.o \
asm-aarch64/blake3/b3_aarch64_sse41.o \
asm-aarch64/sha2/sha256-armv8.o \
asm-aarch64/sha2/sha512-armv8.o
ICP_OBJS_PPC_PPC64 := \
asm-ppc64/blake3/b3_ppc64le_sse2.o \
asm-ppc64/blake3/b3_ppc64le_sse41.o \
asm-ppc64/sha2/sha256-p8.o \
asm-ppc64/sha2/sha512-p8.o \
asm-ppc64/sha2/sha256-ppc.o \
asm-ppc64/sha2/sha512-ppc.o
zfs-objs += $(addprefix icp/,$(ICP_OBJS))
zfs-$(CONFIG_X86) += $(addprefix icp/,$(ICP_OBJS_X86))
zfs-$(CONFIG_UML_X86)+= $(addprefix icp/,$(ICP_OBJS_X86))
zfs-$(CONFIG_X86_64) += $(addprefix icp/,$(ICP_OBJS_X86_64))
zfs-$(CONFIG_ARM) += $(addprefix icp/,$(ICP_OBJS_ARM))
zfs-$(CONFIG_ARM64) += $(addprefix icp/,$(ICP_OBJS_ARM64))
zfs-$(CONFIG_PPC) += $(addprefix icp/,$(ICP_OBJS_PPC_PPC64))
zfs-$(CONFIG_PPC64) += $(addprefix icp/,$(ICP_OBJS_PPC_PPC64))
$(addprefix $(obj)/icp/,$(ICP_OBJS) $(ICP_OBJS_X86) $(ICP_OBJS_X86_64) \
$(ICP_OBJS_ARM64) $(ICP_OBJS_PPC_PPC64)) : asflags-y += -I$(icp_include) -I$(zfs_include)/os/linux/spl -I$(zfs_include)
$(addprefix $(obj)/icp/,$(ICP_OBJS) $(ICP_OBJS_X86) $(ICP_OBJS_X86_64) \
$(ICP_OBJS_ARM64) $(ICP_OBJS_PPC_PPC64)) : ccflags-y += -I$(icp_include) -I$(zfs_include)/os/linux/spl -I$(zfs_include)
# Suppress objtool "return with modified stack frame" warnings.
OBJECT_FILES_NON_STANDARD_aesni-gcm-x86_64.o := y
# Suppress objtool "unsupported stack pointer realignment" warnings.
# See #6950 for the reasoning.
OBJECT_FILES_NON_STANDARD_sha256-x86_64.o := y
OBJECT_FILES_NON_STANDARD_sha512-x86_64.o := y
LUA_OBJS := \
lapi.o \
lauxlib.o \
lbaselib.o \
lcode.o \
lcompat.o \
lcorolib.o \
lctype.o \
ldebug.o \
ldo.o \
lfunc.o \
lgc.o \
llex.o \
lmem.o \
lobject.o \
lopcodes.o \
lparser.o \
lstate.o \
lstring.o \
lstrlib.o \
ltable.o \
ltablib.o \
ltm.o \
lvm.o \
lzio.o \
setjmp/setjmp.o
zfs-objs += $(addprefix lua/,$(LUA_OBJS))
NVPAIR_OBJS := \
fnvpair.o \
nvpair.o \
nvpair_alloc_fixed.o \
nvpair_alloc_spl.o
zfs-objs += $(addprefix nvpair/,$(NVPAIR_OBJS))
UNICODE_OBJS := \
u8_textprep.o \
uconv.o
zfs-objs += $(addprefix unicode/,$(UNICODE_OBJS))
ZCOMMON_OBJS := \
cityhash.o \
zfeature_common.o \
zfs_comutil.o \
zfs_deleg.o \
zfs_fletcher.o \
zfs_fletcher_superscalar.o \
zfs_fletcher_superscalar4.o \
zfs_namecheck.o \
zfs_prop.o \
zpool_prop.o \
zprop_common.o
ZCOMMON_OBJS_X86 := \
zfs_fletcher_avx512.o \
zfs_fletcher_intel.o \
zfs_fletcher_sse.o
ZCOMMON_OBJS_ARM64 := \
zfs_fletcher_aarch64_neon.o
zfs-objs += $(addprefix zcommon/,$(ZCOMMON_OBJS))
zfs-$(CONFIG_X86) += $(addprefix zcommon/,$(ZCOMMON_OBJS_X86))
zfs-$(CONFIG_UML_X86)+= $(addprefix zcommon/,$(ZCOMMON_OBJS_X86))
zfs-$(CONFIG_ARM64) += $(addprefix zcommon/,$(ZCOMMON_OBJS_ARM64))
# Zstd uses -O3 by default, so we should follow
ZFS_ZSTD_FLAGS := -O3
# -fno-tree-vectorize gets set for gcc in zstd/common/compiler.h
# Set it for other compilers, too.
ZFS_ZSTD_FLAGS += -fno-tree-vectorize
# SSE register return with SSE disabled if -march=znverX is passed
ZFS_ZSTD_FLAGS += -U__BMI__
# Quiet warnings about frame size due to unused code in unmodified zstd lib
ZFS_ZSTD_FLAGS += -Wframe-larger-than=20480
ZSTD_OBJS := \
zfs_zstd.o \
zstd_sparc.o
ZSTD_UPSTREAM_OBJS := \
lib/common/entropy_common.o \
lib/common/error_private.o \
lib/common/fse_decompress.o \
lib/common/pool.o \
lib/common/zstd_common.o \
lib/compress/fse_compress.o \
lib/compress/hist.o \
lib/compress/huf_compress.o \
lib/compress/zstd_compress.o \
lib/compress/zstd_compress_literals.o \
lib/compress/zstd_compress_sequences.o \
lib/compress/zstd_compress_superblock.o \
lib/compress/zstd_double_fast.o \
lib/compress/zstd_fast.o \
lib/compress/zstd_lazy.o \
lib/compress/zstd_ldm.o \
lib/compress/zstd_opt.o \
lib/decompress/huf_decompress.o \
lib/decompress/zstd_ddict.o \
lib/decompress/zstd_decompress.o \
lib/decompress/zstd_decompress_block.o
zfs-objs += $(addprefix zstd/,$(ZSTD_OBJS) $(ZSTD_UPSTREAM_OBJS))
# Disable aarch64 neon SIMD instructions for kernel mode
$(addprefix $(obj)/zstd/,$(ZSTD_OBJS) $(ZSTD_UPSTREAM_OBJS)) : ccflags-y += -I$(zstd_include) $(ZFS_ZSTD_FLAGS)
$(addprefix $(obj)/zstd/,$(ZSTD_OBJS) $(ZSTD_UPSTREAM_OBJS)) : asflags-y += -I$(zstd_include)
$(addprefix $(obj)/zstd/,$(ZSTD_UPSTREAM_OBJS)) : ccflags-y += -include $(zstd_include)/aarch64_compat.h -include $(zstd_include)/zstd_compat_wrapper.h -Wp,-w
$(obj)/zstd/zfs_zstd.o : ccflags-y += -include $(zstd_include)/zstd_compat_wrapper.h
ZFS_OBJS := \
abd.o \
aggsum.o \
arc.o \
blake3_zfs.o \
blkptr.o \
bplist.o \
bpobj.o \
bptree.o \
bqueue.o \
brt.o \
btree.o \
dataset_kstats.o \
dbuf.o \
dbuf_stats.o \
ddt.o \
ddt_zap.o \
dmu.o \
dmu_diff.o \
dmu_object.o \
dmu_objset.o \
dmu_recv.o \
dmu_redact.o \
dmu_send.o \
dmu_traverse.o \
dmu_tx.o \
dmu_zfetch.o \
dnode.o \
dnode_sync.o \
dsl_bookmark.o \
dsl_crypt.o \
dsl_dataset.o \
dsl_deadlist.o \
dsl_deleg.o \
dsl_destroy.o \
dsl_dir.o \
dsl_pool.o \
dsl_prop.o \
dsl_scan.o \
dsl_synctask.o \
dsl_userhold.o \
edonr_zfs.o \
fm.o \
gzip.o \
hkdf.o \
lz4.o \
lz4_zfs.o \
lzjb.o \
metaslab.o \
mmp.o \
multilist.o \
objlist.o \
pathname.o \
range_tree.o \
refcount.o \
rrwlock.o \
sa.o \
sha2_zfs.o \
skein_zfs.o \
spa.o \
spa_checkpoint.o \
spa_config.o \
spa_errlog.o \
spa_history.o \
spa_log_spacemap.o \
spa_misc.o \
spa_stats.o \
space_map.o \
space_reftree.o \
txg.o \
uberblock.o \
unique.o \
vdev.o \
vdev_draid.o \
vdev_draid_rand.o \
vdev_indirect.o \
vdev_indirect_births.o \
vdev_indirect_mapping.o \
vdev_initialize.o \
vdev_label.o \
vdev_mirror.o \
vdev_missing.o \
vdev_queue.o \
vdev_raidz.o \
vdev_raidz_math.o \
vdev_raidz_math_scalar.o \
vdev_rebuild.o \
vdev_removal.o \
vdev_root.o \
vdev_trim.o \
zap.o \
zap_leaf.o \
zap_micro.o \
zcp.o \
zcp_get.o \
zcp_global.o \
zcp_iter.o \
zcp_set.o \
zcp_synctask.o \
zfeature.o \
zfs_byteswap.o \
zfs_chksum.o \
zfs_fm.o \
zfs_fuid.o \
zfs_impl.o \
zfs_ioctl.o \
zfs_log.o \
zfs_onexit.o \
zfs_quota.o \
zfs_ratelimit.o \
zfs_replay.o \
zfs_rlock.o \
zfs_sa.o \
zfs_vnops.o \
zil.o \
zio.o \
zio_checksum.o \
zio_compress.o \
zio_inject.o \
zle.o \
zrlock.o \
zthr.o \
zvol.o
ZFS_OBJS_OS := \
abd_os.o \
arc_os.o \
mmp_os.o \
policy.o \
qat.o \
qat_compress.o \
qat_crypt.o \
spa_misc_os.o \
trace.o \
vdev_disk.o \
vdev_file.o \
zfs_acl.o \
zfs_ctldir.o \
zfs_debug.o \
zfs_dir.o \
zfs_file_os.o \
zfs_ioctl_os.o \
zfs_racct.o \
zfs_sysfs.o \
zfs_uio.o \
zfs_vfsops.o \
zfs_vnops_os.o \
zfs_znode.o \
zio_crypt.o \
zpl_ctldir.o \
zpl_export.o \
zpl_file.o \
+ zpl_file_range.o \
zpl_inode.o \
zpl_super.o \
zpl_xattr.o \
zvol_os.o
ZFS_OBJS_X86 := \
vdev_raidz_math_avx2.o \
vdev_raidz_math_avx512bw.o \
vdev_raidz_math_avx512f.o \
vdev_raidz_math_sse2.o \
vdev_raidz_math_ssse3.o
ZFS_OBJS_ARM64 := \
vdev_raidz_math_aarch64_neon.o \
vdev_raidz_math_aarch64_neonx2.o
ZFS_OBJS_PPC_PPC64 := \
vdev_raidz_math_powerpc_altivec.o
zfs-objs += $(addprefix zfs/,$(ZFS_OBJS)) $(addprefix os/linux/zfs/,$(ZFS_OBJS_OS))
zfs-$(CONFIG_X86) += $(addprefix zfs/,$(ZFS_OBJS_X86))
zfs-$(CONFIG_UML_X86)+= $(addprefix zfs/,$(ZFS_OBJS_X86))
zfs-$(CONFIG_ARM64) += $(addprefix zfs/,$(ZFS_OBJS_ARM64))
zfs-$(CONFIG_PPC) += $(addprefix zfs/,$(ZFS_OBJS_PPC_PPC64))
zfs-$(CONFIG_PPC64) += $(addprefix zfs/,$(ZFS_OBJS_PPC_PPC64))
# Suppress incorrect warnings from versions of objtool which are not
# aware of x86 EVEX prefix instructions used for AVX512.
OBJECT_FILES_NON_STANDARD_vdev_raidz_math_avx512bw.o := y
OBJECT_FILES_NON_STANDARD_vdev_raidz_math_avx512f.o := y
ifeq ($(CONFIG_ALTIVEC),y)
$(obj)/zfs/vdev_raidz_math_powerpc_altivec.o : c_flags += -maltivec
endif
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
index 0d6d006781d6..f672deed34dd 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
@@ -1,6438 +1,6440 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2017 Nexenta Systems, Inc.
*/
/* Portions Copyright 2007 Jeremy Teo */
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/param.h>
#include <sys/time.h>
#include <sys/systm.h>
#include <sys/sysmacros.h>
#include <sys/resource.h>
#include <security/mac/mac_framework.h>
#include <sys/vfs.h>
#include <sys/endian.h>
#include <sys/vm.h>
#include <sys/vnode.h>
#if __FreeBSD_version >= 1300102
#include <sys/smr.h>
#endif
#include <sys/dirent.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/kmem.h>
#include <sys/taskq.h>
#include <sys/uio.h>
#include <sys/atomic.h>
#include <sys/namei.h>
#include <sys/mman.h>
#include <sys/cmn_err.h>
#include <sys/kdb.h>
#include <sys/sysproto.h>
#include <sys/errno.h>
#include <sys/unistd.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_ioctl.h>
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/dbuf.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/policy.h>
#include <sys/sunddi.h>
#include <sys/filio.h>
#include <sys/sid.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_quota.h>
#include <sys/zfs_sa.h>
#include <sys/zfs_rlock.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/sched.h>
#include <sys/acl.h>
#include <sys/vmmeter.h>
#include <vm/vm_param.h>
#include <sys/zil.h>
#include <sys/zfs_vnops.h>
#include <sys/module.h>
#include <sys/sysent.h>
#include <sys/dmu_impl.h>
#include <sys/brt.h>
#include <sys/zfeature.h>
#include <vm/vm_object.h>
#include <sys/extattr.h>
#include <sys/priv.h>
#ifndef VN_OPEN_INVFS
#define VN_OPEN_INVFS 0x0
#endif
VFS_SMR_DECLARE;
#if __FreeBSD_version < 1300103
#define NDFREE_PNBUF(ndp) NDFREE((ndp), NDF_ONLY_PNBUF)
#endif
#if __FreeBSD_version >= 1300047
#define vm_page_wire_lock(pp)
#define vm_page_wire_unlock(pp)
#else
#define vm_page_wire_lock(pp) vm_page_lock(pp)
#define vm_page_wire_unlock(pp) vm_page_unlock(pp)
#endif
#ifdef DEBUG_VFS_LOCKS
#define VNCHECKREF(vp) \
VNASSERT((vp)->v_holdcnt > 0 && (vp)->v_usecount > 0, vp, \
("%s: wrong ref counts", __func__));
#else
#define VNCHECKREF(vp)
#endif
#if __FreeBSD_version >= 1400045
typedef uint64_t cookie_t;
#else
typedef ulong_t cookie_t;
#endif
/*
* Programming rules.
*
* Each vnode op performs some logical unit of work. To do this, the ZPL must
* properly lock its in-core state, create a DMU transaction, do the work,
* record this work in the intent log (ZIL), commit the DMU transaction,
* and wait for the intent log to commit if it is a synchronous operation.
* Moreover, the vnode ops must work in both normal and log replay context.
* The ordering of events is important to avoid deadlocks and references
* to freed memory. The example below illustrates the following Big Rules:
*
* (1) A check must be made in each zfs thread for a mounted file system.
* This is done avoiding races using zfs_enter(zfsvfs).
* A zfs_exit(zfsvfs) is needed before all returns. Any znodes
* must be checked with zfs_verify_zp(zp). Both of these macros
* can return EIO from the calling function.
*
* (2) VN_RELE() should always be the last thing except for zil_commit()
* (if necessary) and zfs_exit(). This is for 3 reasons:
* First, if it's the last reference, the vnode/znode
* can be freed, so the zp may point to freed memory. Second, the last
* reference will call zfs_zinactive(), which may induce a lot of work --
* pushing cached pages (which acquires range locks) and syncing out
* cached atime changes. Third, zfs_zinactive() may require a new tx,
* which could deadlock the system if you were already holding one.
* If you must call VN_RELE() within a tx then use VN_RELE_ASYNC().
*
* (3) All range locks must be grabbed before calling dmu_tx_assign(),
* as they can span dmu_tx_assign() calls.
*
* (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
* dmu_tx_assign(). This is critical because we don't want to block
* while holding locks.
*
* If no ZPL locks are held (aside from zfs_enter()), use TXG_WAIT. This
* reduces lock contention and CPU usage when we must wait (note that if
* throughput is constrained by the storage, nearly every transaction
* must wait).
*
* Note, in particular, that if a lock is sometimes acquired before
* the tx assigns, and sometimes after (e.g. z_lock), then failing
* to use a non-blocking assign can deadlock the system. The scenario:
*
* Thread A has grabbed a lock before calling dmu_tx_assign().
* Thread B is in an already-assigned tx, and blocks for this lock.
* Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
* forever, because the previous txg can't quiesce until B's tx commits.
*
* If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
* then drop all locks, call dmu_tx_wait(), and try again. On subsequent
* calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
* to indicate that this operation has already called dmu_tx_wait().
* This will ensure that we don't retry forever, waiting a short bit
* each time.
*
* (5) If the operation succeeded, generate the intent log entry for it
* before dropping locks. This ensures that the ordering of events
* in the intent log matches the order in which they actually occurred.
* During ZIL replay the zfs_log_* functions will update the sequence
* number to indicate the zil transaction has replayed.
*
* (6) At the end of each vnode op, the DMU tx must always commit,
* regardless of whether there were any errors.
*
* (7) After dropping all locks, invoke zil_commit(zilog, foid)
* to ensure that synchronous semantics are provided when necessary.
*
* In general, this is how things should be ordered in each vnode op:
*
* zfs_enter(zfsvfs); // exit if unmounted
* top:
* zfs_dirent_lookup(&dl, ...) // lock directory entry (may VN_HOLD())
* rw_enter(...); // grab any other locks you need
* tx = dmu_tx_create(...); // get DMU tx
* dmu_tx_hold_*(); // hold each object you might modify
* error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
* if (error) {
* rw_exit(...); // drop locks
* zfs_dirent_unlock(dl); // unlock directory entry
* VN_RELE(...); // release held vnodes
* if (error == ERESTART) {
* waited = B_TRUE;
* dmu_tx_wait(tx);
* dmu_tx_abort(tx);
* goto top;
* }
* dmu_tx_abort(tx); // abort DMU tx
* zfs_exit(zfsvfs); // finished in zfs
* return (error); // really out of space
* }
* error = do_real_work(); // do whatever this VOP does
* if (error == 0)
* zfs_log_*(...); // on success, make ZIL entry
* dmu_tx_commit(tx); // commit DMU tx -- error or not
* rw_exit(...); // drop locks
* zfs_dirent_unlock(dl); // unlock directory entry
* VN_RELE(...); // release held vnodes
* zil_commit(zilog, foid); // synchronous when necessary
* zfs_exit(zfsvfs); // finished in zfs
* return (error); // done, report error
*/
static int
zfs_open(vnode_t **vpp, int flag, cred_t *cr)
{
(void) cr;
znode_t *zp = VTOZ(*vpp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int error;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
if ((flag & FWRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
((flag & FAPPEND) == 0)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
/* Keep a count of the synchronous opens in the znode */
if (flag & O_SYNC)
atomic_inc_32(&zp->z_sync_cnt);
zfs_exit(zfsvfs, FTAG);
return (0);
}
static int
zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr)
{
(void) offset, (void) cr;
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int error;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
/* Decrement the synchronous opens in the znode */
if ((flag & O_SYNC) && (count == 1))
atomic_dec_32(&zp->z_sync_cnt);
zfs_exit(zfsvfs, FTAG);
return (0);
}
static int
zfs_ioctl(vnode_t *vp, ulong_t com, intptr_t data, int flag, cred_t *cred,
int *rvalp)
{
(void) flag, (void) cred, (void) rvalp;
loff_t off;
int error;
switch (com) {
case _FIOFFS:
{
return (0);
/*
* The following two ioctls are used by bfu. Faking out,
* necessary to avoid bfu errors.
*/
}
case _FIOGDIO:
case _FIOSDIO:
{
return (0);
}
case F_SEEK_DATA:
case F_SEEK_HOLE:
{
off = *(offset_t *)data;
/* offset parameter is in/out */
error = zfs_holey(VTOZ(vp), com, &off);
if (error)
return (error);
*(offset_t *)data = off;
return (0);
}
}
return (SET_ERROR(ENOTTY));
}
static vm_page_t
page_busy(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes)
{
vm_object_t obj;
vm_page_t pp;
int64_t end;
/*
* At present vm_page_clear_dirty extends the cleared range to DEV_BSIZE
* aligned boundaries, if the range is not aligned. As a result a
* DEV_BSIZE subrange with partially dirty data may get marked as clean.
* It may happen that all DEV_BSIZE subranges are marked clean and thus
* the whole page would be considered clean despite have some
* dirty data.
* For this reason we should shrink the range to DEV_BSIZE aligned
* boundaries before calling vm_page_clear_dirty.
*/
end = rounddown2(off + nbytes, DEV_BSIZE);
off = roundup2(off, DEV_BSIZE);
nbytes = end - off;
obj = vp->v_object;
zfs_vmobject_assert_wlocked_12(obj);
#if __FreeBSD_version < 1300050
for (;;) {
if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
pp->valid) {
if (vm_page_xbusied(pp)) {
/*
* Reference the page before unlocking and
* sleeping so that the page daemon is less
* likely to reclaim it.
*/
vm_page_reference(pp);
vm_page_lock(pp);
zfs_vmobject_wunlock(obj);
vm_page_busy_sleep(pp, "zfsmwb", true);
zfs_vmobject_wlock(obj);
continue;
}
vm_page_sbusy(pp);
} else if (pp != NULL) {
ASSERT(!pp->valid);
pp = NULL;
}
if (pp != NULL) {
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_object_pip_add(obj, 1);
pmap_remove_write(pp);
if (nbytes != 0)
vm_page_clear_dirty(pp, off, nbytes);
}
break;
}
#else
vm_page_grab_valid_unlocked(&pp, obj, OFF_TO_IDX(start),
VM_ALLOC_NOCREAT | VM_ALLOC_SBUSY | VM_ALLOC_NORMAL |
VM_ALLOC_IGN_SBUSY);
if (pp != NULL) {
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_object_pip_add(obj, 1);
pmap_remove_write(pp);
if (nbytes != 0)
vm_page_clear_dirty(pp, off, nbytes);
}
#endif
return (pp);
}
static void
page_unbusy(vm_page_t pp)
{
vm_page_sunbusy(pp);
#if __FreeBSD_version >= 1300041
vm_object_pip_wakeup(pp->object);
#else
vm_object_pip_subtract(pp->object, 1);
#endif
}
#if __FreeBSD_version > 1300051
static vm_page_t
page_hold(vnode_t *vp, int64_t start)
{
vm_object_t obj;
vm_page_t m;
obj = vp->v_object;
vm_page_grab_valid_unlocked(&m, obj, OFF_TO_IDX(start),
VM_ALLOC_NOCREAT | VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY |
VM_ALLOC_NOBUSY);
return (m);
}
#else
static vm_page_t
page_hold(vnode_t *vp, int64_t start)
{
vm_object_t obj;
vm_page_t pp;
obj = vp->v_object;
zfs_vmobject_assert_wlocked(obj);
for (;;) {
if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
pp->valid) {
if (vm_page_xbusied(pp)) {
/*
* Reference the page before unlocking and
* sleeping so that the page daemon is less
* likely to reclaim it.
*/
vm_page_reference(pp);
vm_page_lock(pp);
zfs_vmobject_wunlock(obj);
vm_page_busy_sleep(pp, "zfsmwb", true);
zfs_vmobject_wlock(obj);
continue;
}
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_page_wire_lock(pp);
vm_page_hold(pp);
vm_page_wire_unlock(pp);
} else
pp = NULL;
break;
}
return (pp);
}
#endif
static void
page_unhold(vm_page_t pp)
{
vm_page_wire_lock(pp);
#if __FreeBSD_version >= 1300035
vm_page_unwire(pp, PQ_ACTIVE);
#else
vm_page_unhold(pp);
#endif
vm_page_wire_unlock(pp);
}
/*
* When a file is memory mapped, we must keep the IO data synchronized
* between the DMU cache and the memory mapped pages. What this means:
*
* On Write: If we find a memory mapped page, we write to *both*
* the page and the dmu buffer.
*/
void
update_pages(znode_t *zp, int64_t start, int len, objset_t *os)
{
vm_object_t obj;
struct sf_buf *sf;
vnode_t *vp = ZTOV(zp);
caddr_t va;
int off;
ASSERT3P(vp->v_mount, !=, NULL);
obj = vp->v_object;
ASSERT3P(obj, !=, NULL);
off = start & PAGEOFFSET;
zfs_vmobject_wlock_12(obj);
#if __FreeBSD_version >= 1300041
vm_object_pip_add(obj, 1);
#endif
for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
vm_page_t pp;
int nbytes = imin(PAGESIZE - off, len);
if ((pp = page_busy(vp, start, off, nbytes)) != NULL) {
zfs_vmobject_wunlock_12(obj);
va = zfs_map_page(pp, &sf);
(void) dmu_read(os, zp->z_id, start + off, nbytes,
va + off, DMU_READ_PREFETCH);
zfs_unmap_page(sf);
zfs_vmobject_wlock_12(obj);
page_unbusy(pp);
}
len -= nbytes;
off = 0;
}
#if __FreeBSD_version >= 1300041
vm_object_pip_wakeup(obj);
#else
vm_object_pip_wakeupn(obj, 0);
#endif
zfs_vmobject_wunlock_12(obj);
}
/*
* Read with UIO_NOCOPY flag means that sendfile(2) requests
* ZFS to populate a range of page cache pages with data.
*
* NOTE: this function could be optimized to pre-allocate
* all pages in advance, drain exclusive busy on all of them,
* map them into contiguous KVA region and populate them
* in one single dmu_read() call.
*/
int
mappedread_sf(znode_t *zp, int nbytes, zfs_uio_t *uio)
{
vnode_t *vp = ZTOV(zp);
objset_t *os = zp->z_zfsvfs->z_os;
struct sf_buf *sf;
vm_object_t obj;
vm_page_t pp;
int64_t start;
caddr_t va;
int len = nbytes;
int error = 0;
ASSERT3U(zfs_uio_segflg(uio), ==, UIO_NOCOPY);
ASSERT3P(vp->v_mount, !=, NULL);
obj = vp->v_object;
ASSERT3P(obj, !=, NULL);
ASSERT0(zfs_uio_offset(uio) & PAGEOFFSET);
zfs_vmobject_wlock_12(obj);
for (start = zfs_uio_offset(uio); len > 0; start += PAGESIZE) {
int bytes = MIN(PAGESIZE, len);
pp = vm_page_grab_unlocked(obj, OFF_TO_IDX(start),
VM_ALLOC_SBUSY | VM_ALLOC_NORMAL | VM_ALLOC_IGN_SBUSY);
if (vm_page_none_valid(pp)) {
zfs_vmobject_wunlock_12(obj);
va = zfs_map_page(pp, &sf);
error = dmu_read(os, zp->z_id, start, bytes, va,
DMU_READ_PREFETCH);
if (bytes != PAGESIZE && error == 0)
memset(va + bytes, 0, PAGESIZE - bytes);
zfs_unmap_page(sf);
zfs_vmobject_wlock_12(obj);
#if __FreeBSD_version >= 1300081
if (error == 0) {
vm_page_valid(pp);
vm_page_activate(pp);
vm_page_do_sunbusy(pp);
} else {
zfs_vmobject_wlock(obj);
if (!vm_page_wired(pp) && pp->valid == 0 &&
vm_page_busy_tryupgrade(pp))
vm_page_free(pp);
else
vm_page_sunbusy(pp);
zfs_vmobject_wunlock(obj);
}
#else
vm_page_do_sunbusy(pp);
vm_page_lock(pp);
if (error) {
if (pp->wire_count == 0 && pp->valid == 0 &&
!vm_page_busied(pp))
vm_page_free(pp);
} else {
pp->valid = VM_PAGE_BITS_ALL;
vm_page_activate(pp);
}
vm_page_unlock(pp);
#endif
} else {
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_page_do_sunbusy(pp);
}
if (error)
break;
zfs_uio_advance(uio, bytes);
len -= bytes;
}
zfs_vmobject_wunlock_12(obj);
return (error);
}
/*
* When a file is memory mapped, we must keep the IO data synchronized
* between the DMU cache and the memory mapped pages. What this means:
*
* On Read: We "read" preferentially from memory mapped pages,
* else we default from the dmu buffer.
*
* NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
* the file is memory mapped.
*/
int
mappedread(znode_t *zp, int nbytes, zfs_uio_t *uio)
{
vnode_t *vp = ZTOV(zp);
vm_object_t obj;
int64_t start;
int len = nbytes;
int off;
int error = 0;
ASSERT3P(vp->v_mount, !=, NULL);
obj = vp->v_object;
ASSERT3P(obj, !=, NULL);
start = zfs_uio_offset(uio);
off = start & PAGEOFFSET;
zfs_vmobject_wlock_12(obj);
for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
vm_page_t pp;
uint64_t bytes = MIN(PAGESIZE - off, len);
if ((pp = page_hold(vp, start))) {
struct sf_buf *sf;
caddr_t va;
zfs_vmobject_wunlock_12(obj);
va = zfs_map_page(pp, &sf);
error = vn_io_fault_uiomove(va + off, bytes,
GET_UIO_STRUCT(uio));
zfs_unmap_page(sf);
zfs_vmobject_wlock_12(obj);
page_unhold(pp);
} else {
zfs_vmobject_wunlock_12(obj);
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, bytes);
zfs_vmobject_wlock_12(obj);
}
len -= bytes;
off = 0;
if (error)
break;
}
zfs_vmobject_wunlock_12(obj);
return (error);
}
int
zfs_write_simple(znode_t *zp, const void *data, size_t len,
loff_t pos, size_t *presid)
{
int error = 0;
ssize_t resid;
error = vn_rdwr(UIO_WRITE, ZTOV(zp), __DECONST(void *, data), len, pos,
UIO_SYSSPACE, IO_SYNC, kcred, NOCRED, &resid, curthread);
if (error) {
return (SET_ERROR(error));
} else if (presid == NULL) {
if (resid != 0) {
error = SET_ERROR(EIO);
}
} else {
*presid = resid;
}
return (error);
}
void
zfs_zrele_async(znode_t *zp)
{
vnode_t *vp = ZTOV(zp);
objset_t *os = ITOZSB(vp)->z_os;
VN_RELE_ASYNC(vp, dsl_pool_zrele_taskq(dmu_objset_pool(os)));
}
static int
zfs_dd_callback(struct mount *mp, void *arg, int lkflags, struct vnode **vpp)
{
int error;
*vpp = arg;
error = vn_lock(*vpp, lkflags);
if (error != 0)
vrele(*vpp);
return (error);
}
static int
zfs_lookup_lock(vnode_t *dvp, vnode_t *vp, const char *name, int lkflags)
{
znode_t *zdp = VTOZ(dvp);
zfsvfs_t *zfsvfs __unused = zdp->z_zfsvfs;
int error;
int ltype;
if (zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_LOCKED(dvp, __func__);
if (name[0] == 0 || (name[0] == '.' && name[1] == 0)) {
ASSERT3P(dvp, ==, vp);
vref(dvp);
ltype = lkflags & LK_TYPE_MASK;
if (ltype != VOP_ISLOCKED(dvp)) {
if (ltype == LK_EXCLUSIVE)
vn_lock(dvp, LK_UPGRADE | LK_RETRY);
else /* if (ltype == LK_SHARED) */
vn_lock(dvp, LK_DOWNGRADE | LK_RETRY);
/*
* Relock for the "." case could leave us with
* reclaimed vnode.
*/
if (VN_IS_DOOMED(dvp)) {
vrele(dvp);
return (SET_ERROR(ENOENT));
}
}
return (0);
} else if (name[0] == '.' && name[1] == '.' && name[2] == 0) {
/*
* Note that in this case, dvp is the child vnode, and we
* are looking up the parent vnode - exactly reverse from
* normal operation. Unlocking dvp requires some rather
* tricky unlock/relock dance to prevent mp from being freed;
* use vn_vget_ino_gen() which takes care of all that.
*
* XXX Note that there is a time window when both vnodes are
* unlocked. It is possible, although highly unlikely, that
* during that window the parent-child relationship between
* the vnodes may change, for example, get reversed.
* In that case we would have a wrong lock order for the vnodes.
* All other filesystems seem to ignore this problem, so we
* do the same here.
* A potential solution could be implemented as follows:
* - using LK_NOWAIT when locking the second vnode and retrying
* if necessary
* - checking that the parent-child relationship still holds
* after locking both vnodes and retrying if it doesn't
*/
error = vn_vget_ino_gen(dvp, zfs_dd_callback, vp, lkflags, &vp);
return (error);
} else {
error = vn_lock(vp, lkflags);
if (error != 0)
vrele(vp);
return (error);
}
}
/*
* Lookup an entry in a directory, or an extended attribute directory.
* If it exists, return a held vnode reference for it.
*
* IN: dvp - vnode of directory to search.
* nm - name of entry to lookup.
* pnp - full pathname to lookup [UNUSED].
* flags - LOOKUP_XATTR set if looking for an attribute.
* rdir - root directory vnode [UNUSED].
* cr - credentials of caller.
* ct - caller context
*
* OUT: vpp - vnode of located entry, NULL if not found.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* NA
*/
static int
zfs_lookup(vnode_t *dvp, const char *nm, vnode_t **vpp,
struct componentname *cnp, int nameiop, cred_t *cr, int flags,
boolean_t cached)
{
znode_t *zdp = VTOZ(dvp);
znode_t *zp;
zfsvfs_t *zfsvfs = zdp->z_zfsvfs;
#if __FreeBSD_version > 1300124
seqc_t dvp_seqc;
#endif
int error = 0;
/*
* Fast path lookup, however we must skip DNLC lookup
* for case folding or normalizing lookups because the
* DNLC code only stores the passed in name. This means
* creating 'a' and removing 'A' on a case insensitive
* file system would work, but DNLC still thinks 'a'
* exists and won't let you create it again on the next
* pass through fast path.
*/
if (!(flags & LOOKUP_XATTR)) {
if (dvp->v_type != VDIR) {
return (SET_ERROR(ENOTDIR));
} else if (zdp->z_sa_hdl == NULL) {
return (SET_ERROR(EIO));
}
}
DTRACE_PROBE2(zfs__fastpath__lookup__miss, vnode_t *, dvp,
const char *, nm);
if ((error = zfs_enter_verify_zp(zfsvfs, zdp, FTAG)) != 0)
return (error);
#if __FreeBSD_version > 1300124
dvp_seqc = vn_seqc_read_notmodify(dvp);
#endif
*vpp = NULL;
if (flags & LOOKUP_XATTR) {
/*
* If the xattr property is off, refuse the lookup request.
*/
if (!(zfsvfs->z_flags & ZSB_XATTR)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EOPNOTSUPP));
}
/*
* We don't allow recursive attributes..
* Maybe someday we will.
*/
if (zdp->z_pflags & ZFS_XATTR) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
if ((error = zfs_get_xattrdir(VTOZ(dvp), &zp, cr, flags))) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
*vpp = ZTOV(zp);
/*
* Do we have permission to get into attribute directory?
*/
error = zfs_zaccess(zp, ACE_EXECUTE, 0, B_FALSE, cr, NULL);
if (error) {
vrele(ZTOV(zp));
}
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Check accessibility of directory if we're not coming in via
* VOP_CACHEDLOOKUP.
*/
if (!cached) {
#ifdef NOEXECCHECK
if ((cnp->cn_flags & NOEXECCHECK) != 0) {
cnp->cn_flags &= ~NOEXECCHECK;
} else
#endif
if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr,
NULL))) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
}
if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
/*
* First handle the special cases.
*/
if ((cnp->cn_flags & ISDOTDOT) != 0) {
/*
* If we are a snapshot mounted under .zfs, return
* the vp for the snapshot directory.
*/
if (zdp->z_id == zfsvfs->z_root && zfsvfs->z_parent != zfsvfs) {
struct componentname cn;
vnode_t *zfsctl_vp;
int ltype;
zfs_exit(zfsvfs, FTAG);
ltype = VOP_ISLOCKED(dvp);
VOP_UNLOCK1(dvp);
error = zfsctl_root(zfsvfs->z_parent, LK_SHARED,
&zfsctl_vp);
if (error == 0) {
cn.cn_nameptr = "snapshot";
cn.cn_namelen = strlen(cn.cn_nameptr);
cn.cn_nameiop = cnp->cn_nameiop;
cn.cn_flags = cnp->cn_flags & ~ISDOTDOT;
cn.cn_lkflags = cnp->cn_lkflags;
error = VOP_LOOKUP(zfsctl_vp, vpp, &cn);
vput(zfsctl_vp);
}
vn_lock(dvp, ltype | LK_RETRY);
return (error);
}
}
if (zfs_has_ctldir(zdp) && strcmp(nm, ZFS_CTLDIR_NAME) == 0) {
zfs_exit(zfsvfs, FTAG);
if ((cnp->cn_flags & ISLASTCN) != 0 && nameiop != LOOKUP)
return (SET_ERROR(ENOTSUP));
error = zfsctl_root(zfsvfs, cnp->cn_lkflags, vpp);
return (error);
}
/*
* The loop is retry the lookup if the parent-child relationship
* changes during the dot-dot locking complexities.
*/
for (;;) {
uint64_t parent;
error = zfs_dirlook(zdp, nm, &zp);
if (error == 0)
*vpp = ZTOV(zp);
zfs_exit(zfsvfs, FTAG);
if (error != 0)
break;
error = zfs_lookup_lock(dvp, *vpp, nm, cnp->cn_lkflags);
if (error != 0) {
/*
* If we've got a locking error, then the vnode
* got reclaimed because of a force unmount.
* We never enter doomed vnodes into the name cache.
*/
*vpp = NULL;
return (error);
}
if ((cnp->cn_flags & ISDOTDOT) == 0)
break;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0) {
vput(ZTOV(zp));
*vpp = NULL;
return (error);
}
if (zdp->z_sa_hdl == NULL) {
error = SET_ERROR(EIO);
} else {
error = sa_lookup(zdp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (parent));
}
if (error != 0) {
zfs_exit(zfsvfs, FTAG);
vput(ZTOV(zp));
break;
}
if (zp->z_id == parent) {
zfs_exit(zfsvfs, FTAG);
break;
}
vput(ZTOV(zp));
}
if (error != 0)
*vpp = NULL;
/* Translate errors and add SAVENAME when needed. */
if (cnp->cn_flags & ISLASTCN) {
switch (nameiop) {
case CREATE:
case RENAME:
if (error == ENOENT) {
error = EJUSTRETURN;
#if __FreeBSD_version < 1400068
cnp->cn_flags |= SAVENAME;
#endif
break;
}
zfs_fallthrough;
case DELETE:
#if __FreeBSD_version < 1400068
if (error == 0)
cnp->cn_flags |= SAVENAME;
#endif
break;
}
}
#if __FreeBSD_version > 1300124
if ((cnp->cn_flags & ISDOTDOT) != 0) {
/*
* FIXME: zfs_lookup_lock relocks vnodes and does nothing to
* handle races. In particular different callers may end up
* with different vnodes and will try to add conflicting
* entries to the namecache.
*
* While finding different result may be acceptable in face
* of concurrent modification, adding conflicting entries
* trips over an assert in the namecache.
*
* Ultimately let an entry through once everything settles.
*/
if (!vn_seqc_consistent(dvp, dvp_seqc)) {
cnp->cn_flags &= ~MAKEENTRY;
}
}
#endif
/* Insert name into cache (as non-existent) if appropriate. */
if (zfsvfs->z_use_namecache && !zfsvfs->z_replay &&
error == ENOENT && (cnp->cn_flags & MAKEENTRY) != 0)
cache_enter(dvp, NULL, cnp);
/* Insert name into cache if appropriate. */
if (zfsvfs->z_use_namecache && !zfsvfs->z_replay &&
error == 0 && (cnp->cn_flags & MAKEENTRY)) {
if (!(cnp->cn_flags & ISLASTCN) ||
(nameiop != DELETE && nameiop != RENAME)) {
cache_enter(dvp, *vpp, cnp);
}
}
return (error);
}
/*
* Attempt to create a new entry in a directory. If the entry
* already exists, truncate the file if permissible, else return
* an error. Return the vp of the created or trunc'd file.
*
* IN: dvp - vnode of directory to put new file entry in.
* name - name of new file entry.
* vap - attributes of new file.
* excl - flag indicating exclusive or non-exclusive mode.
* mode - mode to open file with.
* cr - credentials of caller.
* flag - large file flag [UNUSED].
* ct - caller context
* vsecp - ACL to be set
* mnt_ns - Unused on FreeBSD
*
* OUT: vpp - vnode of created or trunc'd entry.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dvp - ctime|mtime updated if new entry created
* vp - ctime|mtime always, atime if new
*/
int
zfs_create(znode_t *dzp, const char *name, vattr_t *vap, int excl, int mode,
znode_t **zpp, cred_t *cr, int flag, vsecattr_t *vsecp, zidmap_t *mnt_ns)
{
(void) excl, (void) mode, (void) flag;
znode_t *zp;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zilog_t *zilog;
objset_t *os;
dmu_tx_t *tx;
int error;
uid_t uid = crgetuid(cr);
gid_t gid = crgetgid(cr);
uint64_t projid = ZFS_DEFAULT_PROJID;
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
uint64_t txtype;
#ifdef DEBUG_VFS_LOCKS
vnode_t *dvp = ZTOV(dzp);
#endif
/*
* If we have an ephemeral id, ACL, or XVATTR then
* make sure file system is at proper version
*/
if (zfsvfs->z_use_fuids == B_FALSE &&
(vsecp || (vap->va_mask & AT_XVATTR) ||
IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (SET_ERROR(EINVAL));
if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
return (error);
os = zfsvfs->z_os;
zilog = zfsvfs->z_log;
if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
if (vap->va_mask & AT_XVATTR) {
if ((error = secpolicy_xvattr(ZTOV(dzp), (xvattr_t *)vap,
crgetuid(cr), cr, vap->va_type)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
}
*zpp = NULL;
if ((vap->va_mode & S_ISVTX) && secpolicy_vnode_stky_modify(cr))
vap->va_mode &= ~S_ISVTX;
error = zfs_dirent_lookup(dzp, name, &zp, ZNEW);
if (error) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
ASSERT3P(zp, ==, NULL);
/*
* Create a new file object and update the directory
* to reference it.
*/
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr, mnt_ns))) {
goto out;
}
/*
* We only support the creation of regular files in
* extended attribute directories.
*/
if ((dzp->z_pflags & ZFS_XATTR) &&
(vap->va_type != VREG)) {
error = SET_ERROR(EINVAL);
goto out;
}
if ((error = zfs_acl_ids_create(dzp, 0, vap,
cr, vsecp, &acl_ids, NULL)) != 0)
goto out;
if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
projid = zfs_inherit_projid(dzp);
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
zfs_acl_ids_free(&acl_ids);
error = SET_ERROR(EDQUOT);
goto out;
}
getnewvnode_reserve_();
tx = dmu_tx_create(os);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
if (!zfsvfs->z_use_sa &&
acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, acl_ids.z_aclp->z_acl_bytes);
}
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
getnewvnode_drop_reserve();
zfs_exit(zfsvfs, FTAG);
return (error);
}
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
(void) zfs_link_create(dzp, name, zp, tx, ZNEW);
txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
zfs_log_create(zilog, tx, txtype, dzp, zp, name,
vsecp, acl_ids.z_fuidp, vap);
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
getnewvnode_drop_reserve();
out:
VNCHECKREF(dvp);
if (error == 0) {
*zpp = zp;
}
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Remove an entry from a directory.
*
* IN: dvp - vnode of directory to remove entry from.
* name - name of entry to remove.
* cr - credentials of caller.
* ct - caller context
* flags - case flags
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dvp - ctime|mtime
* vp - ctime (if nlink > 0)
*/
static int
zfs_remove_(vnode_t *dvp, vnode_t *vp, const char *name, cred_t *cr)
{
znode_t *dzp = VTOZ(dvp);
znode_t *zp;
znode_t *xzp;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zilog_t *zilog;
uint64_t xattr_obj;
uint64_t obj = 0;
dmu_tx_t *tx;
boolean_t unlinked;
uint64_t txtype;
int error;
if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
return (error);
zp = VTOZ(vp);
if ((error = zfs_verify_zp(zp)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
zilog = zfsvfs->z_log;
xattr_obj = 0;
xzp = NULL;
if ((error = zfs_zaccess_delete(dzp, zp, cr, NULL))) {
goto out;
}
/*
* Need to use rmdir for removing directories.
*/
if (vp->v_type == VDIR) {
error = SET_ERROR(EPERM);
goto out;
}
vnevent_remove(vp, dvp, name, ct);
obj = zp->z_id;
/* are there any extended attributes? */
error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj, sizeof (xattr_obj));
if (error == 0 && xattr_obj) {
error = zfs_zget(zfsvfs, xattr_obj, &xzp);
ASSERT0(error);
}
/*
* We may delete the znode now, or we may put it in the unlinked set;
* it depends on whether we're the last link, and on whether there are
* other holds on the vnode. So we dmu_tx_hold() the right things to
* allow for either case.
*/
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
zfs_sa_upgrade_txholds(tx, dzp);
if (xzp) {
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
}
/* charge as an update -- would be nice not to charge at all */
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
/*
* Mark this transaction as typically resulting in a net free of space
*/
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Remove the directory entry.
*/
error = zfs_link_destroy(dzp, name, zp, tx, ZEXISTS, &unlinked);
if (error) {
dmu_tx_commit(tx);
goto out;
}
if (unlinked) {
zfs_unlinked_add(zp, tx);
vp->v_vflag |= VV_NOSYNC;
}
/* XXX check changes to linux vnops */
txtype = TX_REMOVE;
zfs_log_remove(zilog, tx, txtype, dzp, name, obj, unlinked);
dmu_tx_commit(tx);
out:
if (xzp)
vrele(ZTOV(xzp));
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
}
static int
zfs_lookup_internal(znode_t *dzp, const char *name, vnode_t **vpp,
struct componentname *cnp, int nameiop)
{
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
int error;
cnp->cn_nameptr = __DECONST(char *, name);
cnp->cn_namelen = strlen(name);
cnp->cn_nameiop = nameiop;
cnp->cn_flags = ISLASTCN;
#if __FreeBSD_version < 1400068
cnp->cn_flags |= SAVENAME;
#endif
cnp->cn_lkflags = LK_EXCLUSIVE | LK_RETRY;
cnp->cn_cred = kcred;
#if __FreeBSD_version < 1400037
cnp->cn_thread = curthread;
#endif
if (zfsvfs->z_use_namecache && !zfsvfs->z_replay) {
struct vop_lookup_args a;
a.a_gen.a_desc = &vop_lookup_desc;
a.a_dvp = ZTOV(dzp);
a.a_vpp = vpp;
a.a_cnp = cnp;
error = vfs_cache_lookup(&a);
} else {
error = zfs_lookup(ZTOV(dzp), name, vpp, cnp, nameiop, kcred, 0,
B_FALSE);
}
#ifdef ZFS_DEBUG
if (error) {
printf("got error %d on name %s on op %d\n", error, name,
nameiop);
kdb_backtrace();
}
#endif
return (error);
}
int
zfs_remove(znode_t *dzp, const char *name, cred_t *cr, int flags)
{
vnode_t *vp;
int error;
struct componentname cn;
if ((error = zfs_lookup_internal(dzp, name, &vp, &cn, DELETE)))
return (error);
error = zfs_remove_(ZTOV(dzp), vp, name, cr);
vput(vp);
return (error);
}
/*
* Create a new directory and insert it into dvp using the name
* provided. Return a pointer to the inserted directory.
*
* IN: dvp - vnode of directory to add subdir to.
* dirname - name of new directory.
* vap - attributes of new directory.
* cr - credentials of caller.
* ct - caller context
* flags - case flags
* vsecp - ACL to be set
* mnt_ns - Unused on FreeBSD
*
* OUT: vpp - vnode of created directory.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dvp - ctime|mtime updated
* vp - ctime|mtime|atime updated
*/
int
zfs_mkdir(znode_t *dzp, const char *dirname, vattr_t *vap, znode_t **zpp,
cred_t *cr, int flags, vsecattr_t *vsecp, zidmap_t *mnt_ns)
{
(void) flags, (void) vsecp;
znode_t *zp;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zilog_t *zilog;
uint64_t txtype;
dmu_tx_t *tx;
int error;
uid_t uid = crgetuid(cr);
gid_t gid = crgetgid(cr);
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
ASSERT3U(vap->va_type, ==, VDIR);
/*
* If we have an ephemeral id, ACL, or XVATTR then
* make sure file system is at proper version
*/
if (zfsvfs->z_use_fuids == B_FALSE &&
((vap->va_mask & AT_XVATTR) ||
IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (SET_ERROR(EINVAL));
if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
return (error);
zilog = zfsvfs->z_log;
if (dzp->z_pflags & ZFS_XATTR) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
if (zfsvfs->z_utf8 && u8_validate(dirname,
strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
if (vap->va_mask & AT_XVATTR) {
if ((error = secpolicy_xvattr(ZTOV(dzp), (xvattr_t *)vap,
crgetuid(cr), cr, vap->va_type)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
}
if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
NULL, &acl_ids, NULL)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* First make sure the new directory doesn't exist.
*
* Existence is checked first to make sure we don't return
* EACCES instead of EEXIST which can cause some applications
* to fail.
*/
*zpp = NULL;
if ((error = zfs_dirent_lookup(dzp, dirname, &zp, ZNEW))) {
zfs_acl_ids_free(&acl_ids);
zfs_exit(zfsvfs, FTAG);
return (error);
}
ASSERT3P(zp, ==, NULL);
if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr,
mnt_ns))) {
zfs_acl_ids_free(&acl_ids);
zfs_exit(zfsvfs, FTAG);
return (error);
}
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, zfs_inherit_projid(dzp))) {
zfs_acl_ids_free(&acl_ids);
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EDQUOT));
}
/*
* Add a new entry to the directory.
*/
getnewvnode_reserve_();
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
acl_ids.z_aclp->z_acl_bytes);
}
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
getnewvnode_drop_reserve();
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Create new node.
*/
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
/*
* Now put new name in parent dir.
*/
(void) zfs_link_create(dzp, dirname, zp, tx, ZNEW);
*zpp = zp;
txtype = zfs_log_create_txtype(Z_DIR, NULL, vap);
zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, NULL,
acl_ids.z_fuidp, vap);
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
getnewvnode_drop_reserve();
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (0);
}
#if __FreeBSD_version < 1300124
static void
cache_vop_rmdir(struct vnode *dvp, struct vnode *vp)
{
cache_purge(dvp);
cache_purge(vp);
}
#endif
/*
* Remove a directory subdir entry. If the current working
* directory is the same as the subdir to be removed, the
* remove will fail.
*
* IN: dvp - vnode of directory to remove from.
* name - name of directory to be removed.
* cwd - vnode of current working directory.
* cr - credentials of caller.
* ct - caller context
* flags - case flags
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dvp - ctime|mtime updated
*/
static int
zfs_rmdir_(vnode_t *dvp, vnode_t *vp, const char *name, cred_t *cr)
{
znode_t *dzp = VTOZ(dvp);
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zilog_t *zilog;
dmu_tx_t *tx;
int error;
if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
return (error);
if ((error = zfs_verify_zp(zp)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
zilog = zfsvfs->z_log;
if ((error = zfs_zaccess_delete(dzp, zp, cr, NULL))) {
goto out;
}
if (vp->v_type != VDIR) {
error = SET_ERROR(ENOTDIR);
goto out;
}
vnevent_rmdir(vp, dvp, name, ct);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
zfs_sa_upgrade_txholds(tx, zp);
zfs_sa_upgrade_txholds(tx, dzp);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
zfs_exit(zfsvfs, FTAG);
return (error);
}
error = zfs_link_destroy(dzp, name, zp, tx, ZEXISTS, NULL);
if (error == 0) {
uint64_t txtype = TX_RMDIR;
zfs_log_remove(zilog, tx, txtype, dzp, name,
ZFS_NO_OBJECT, B_FALSE);
}
dmu_tx_commit(tx);
if (zfsvfs->z_use_namecache)
cache_vop_rmdir(dvp, vp);
out:
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
}
int
zfs_rmdir(znode_t *dzp, const char *name, znode_t *cwd, cred_t *cr, int flags)
{
struct componentname cn;
vnode_t *vp;
int error;
if ((error = zfs_lookup_internal(dzp, name, &vp, &cn, DELETE)))
return (error);
error = zfs_rmdir_(ZTOV(dzp), vp, name, cr);
vput(vp);
return (error);
}
/*
* Read as many directory entries as will fit into the provided
* buffer from the given directory cursor position (specified in
* the uio structure).
*
* IN: vp - vnode of directory to read.
* uio - structure supplying read location, range info,
* and return buffer.
* cr - credentials of caller.
* ct - caller context
*
* OUT: uio - updated offset and range, buffer filled.
* eofp - set to true if end-of-file detected.
* ncookies- number of entries in cookies
* cookies - offsets to directory entries
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* vp - atime updated
*
* Note that the low 4 bits of the cookie returned by zap is always zero.
* This allows us to use the low range for "special" directory entries:
* We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
* we use the offset 2 for the '.zfs' directory.
*/
static int
zfs_readdir(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, int *eofp,
int *ncookies, cookie_t **cookies)
{
znode_t *zp = VTOZ(vp);
iovec_t *iovp;
dirent64_t *odp;
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
objset_t *os;
caddr_t outbuf;
size_t bufsize;
zap_cursor_t zc;
zap_attribute_t zap;
uint_t bytes_wanted;
uint64_t offset; /* must be unsigned; checks for < 1 */
uint64_t parent;
int local_eof;
int outcount;
int error;
uint8_t prefetch;
uint8_t type;
int ncooks;
cookie_t *cooks = NULL;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (parent))) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* If we are not given an eof variable,
* use a local one.
*/
if (eofp == NULL)
eofp = &local_eof;
/*
* Check for valid iov_len.
*/
if (GET_UIO_STRUCT(uio)->uio_iov->iov_len <= 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Quit if directory has been removed (posix)
*/
if ((*eofp = zp->z_unlinked) != 0) {
zfs_exit(zfsvfs, FTAG);
return (0);
}
error = 0;
os = zfsvfs->z_os;
offset = zfs_uio_offset(uio);
prefetch = zp->z_zn_prefetch;
/*
* Initialize the iterator cursor.
*/
if (offset <= 3) {
/*
* Start iteration from the beginning of the directory.
*/
zap_cursor_init(&zc, os, zp->z_id);
} else {
/*
* The offset is a serialized cursor.
*/
zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
}
/*
* Get space to change directory entries into fs independent format.
*/
iovp = GET_UIO_STRUCT(uio)->uio_iov;
bytes_wanted = iovp->iov_len;
if (zfs_uio_segflg(uio) != UIO_SYSSPACE || zfs_uio_iovcnt(uio) != 1) {
bufsize = bytes_wanted;
outbuf = kmem_alloc(bufsize, KM_SLEEP);
odp = (struct dirent64 *)outbuf;
} else {
bufsize = bytes_wanted;
outbuf = NULL;
odp = (struct dirent64 *)iovp->iov_base;
}
if (ncookies != NULL) {
/*
* Minimum entry size is dirent size and 1 byte for a file name.
*/
ncooks = zfs_uio_resid(uio) / (sizeof (struct dirent) -
sizeof (((struct dirent *)NULL)->d_name) + 1);
cooks = malloc(ncooks * sizeof (*cooks), M_TEMP, M_WAITOK);
*cookies = cooks;
*ncookies = ncooks;
}
/*
* Transform to file-system independent format
*/
outcount = 0;
while (outcount < bytes_wanted) {
ino64_t objnum;
ushort_t reclen;
off64_t *next = NULL;
/*
* Special case `.', `..', and `.zfs'.
*/
if (offset == 0) {
(void) strcpy(zap.za_name, ".");
zap.za_normalization_conflict = 0;
objnum = zp->z_id;
type = DT_DIR;
} else if (offset == 1) {
(void) strcpy(zap.za_name, "..");
zap.za_normalization_conflict = 0;
objnum = parent;
type = DT_DIR;
} else if (offset == 2 && zfs_show_ctldir(zp)) {
(void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
zap.za_normalization_conflict = 0;
objnum = ZFSCTL_INO_ROOT;
type = DT_DIR;
} else {
/*
* Grab next entry.
*/
if ((error = zap_cursor_retrieve(&zc, &zap))) {
if ((*eofp = (error == ENOENT)) != 0)
break;
else
goto update;
}
if (zap.za_integer_length != 8 ||
zap.za_num_integers != 1) {
cmn_err(CE_WARN, "zap_readdir: bad directory "
"entry, obj = %lld, offset = %lld\n",
(u_longlong_t)zp->z_id,
(u_longlong_t)offset);
error = SET_ERROR(ENXIO);
goto update;
}
objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
/*
* MacOS X can extract the object type here such as:
* uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer);
*/
type = ZFS_DIRENT_TYPE(zap.za_first_integer);
}
reclen = DIRENT64_RECLEN(strlen(zap.za_name));
/*
* Will this entry fit in the buffer?
*/
if (outcount + reclen > bufsize) {
/*
* Did we manage to fit anything in the buffer?
*/
if (!outcount) {
error = SET_ERROR(EINVAL);
goto update;
}
break;
}
/*
* Add normal entry:
*/
odp->d_ino = objnum;
odp->d_reclen = reclen;
odp->d_namlen = strlen(zap.za_name);
/* NOTE: d_off is the offset for the *next* entry. */
next = &odp->d_off;
strlcpy(odp->d_name, zap.za_name, odp->d_namlen + 1);
odp->d_type = type;
dirent_terminate(odp);
odp = (dirent64_t *)((intptr_t)odp + reclen);
outcount += reclen;
ASSERT3S(outcount, <=, bufsize);
/* Prefetch znode */
if (prefetch)
dmu_prefetch(os, objnum, 0, 0, 0,
ZIO_PRIORITY_SYNC_READ);
/*
* Move to the next entry, fill in the previous offset.
*/
if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
zap_cursor_advance(&zc);
offset = zap_cursor_serialize(&zc);
} else {
offset += 1;
}
/* Fill the offset right after advancing the cursor. */
if (next != NULL)
*next = offset;
if (cooks != NULL) {
*cooks++ = offset;
ncooks--;
KASSERT(ncooks >= 0, ("ncookies=%d", ncooks));
}
}
zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
/* Subtract unused cookies */
if (ncookies != NULL)
*ncookies -= ncooks;
if (zfs_uio_segflg(uio) == UIO_SYSSPACE && zfs_uio_iovcnt(uio) == 1) {
iovp->iov_base += outcount;
iovp->iov_len -= outcount;
zfs_uio_resid(uio) -= outcount;
} else if ((error =
zfs_uiomove(outbuf, (long)outcount, UIO_READ, uio))) {
/*
* Reset the pointer.
*/
offset = zfs_uio_offset(uio);
}
update:
zap_cursor_fini(&zc);
if (zfs_uio_segflg(uio) != UIO_SYSSPACE || zfs_uio_iovcnt(uio) != 1)
kmem_free(outbuf, bufsize);
if (error == ENOENT)
error = 0;
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
zfs_uio_setoffset(uio, offset);
zfs_exit(zfsvfs, FTAG);
if (error != 0 && cookies != NULL) {
free(*cookies, M_TEMP);
*cookies = NULL;
*ncookies = 0;
}
return (error);
}
/*
* Get the requested file attributes and place them in the provided
* vattr structure.
*
* IN: vp - vnode of file.
* vap - va_mask identifies requested attributes.
* If AT_XVATTR set, then optional attrs are requested
* flags - ATTR_NOACLCHECK (CIFS server context)
* cr - credentials of caller.
*
* OUT: vap - attribute values.
*
* RETURN: 0 (always succeeds).
*/
static int
zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int error = 0;
uint32_t blksize;
u_longlong_t nblocks;
uint64_t mtime[2], ctime[2], crtime[2], rdev;
xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
xoptattr_t *xoap = NULL;
boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
sa_bulk_attr_t bulk[4];
int count = 0;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16);
if (vp->v_type == VBLK || vp->v_type == VCHR)
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
&rdev, 8);
if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
* Also, if we are the owner don't bother, since owner should
* always be allowed to read basic attributes of file.
*/
if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
(vap->va_uid != crgetuid(cr))) {
if ((error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
skipaclchk, cr, NULL))) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
}
/*
* Return all attributes. It's cheaper to provide the answer
* than to determine whether we were asked the question.
*/
vap->va_type = IFTOVT(zp->z_mode);
vap->va_mode = zp->z_mode & ~S_IFMT;
vn_fsid(vp, vap);
vap->va_nodeid = zp->z_id;
vap->va_nlink = zp->z_links;
if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp) &&
zp->z_links < ZFS_LINK_MAX)
vap->va_nlink++;
vap->va_size = zp->z_size;
if (vp->v_type == VBLK || vp->v_type == VCHR)
vap->va_rdev = zfs_cmpldev(rdev);
vap->va_gen = zp->z_gen;
vap->va_flags = 0; /* FreeBSD: Reset chflags(2) flags. */
vap->va_filerev = zp->z_seq;
/*
* Add in any requested optional attributes and the create time.
* Also set the corresponding bits in the returned attribute bitmap.
*/
if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) {
if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
xoap->xoa_archive =
((zp->z_pflags & ZFS_ARCHIVE) != 0);
XVA_SET_RTN(xvap, XAT_ARCHIVE);
}
if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
xoap->xoa_readonly =
((zp->z_pflags & ZFS_READONLY) != 0);
XVA_SET_RTN(xvap, XAT_READONLY);
}
if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
xoap->xoa_system =
((zp->z_pflags & ZFS_SYSTEM) != 0);
XVA_SET_RTN(xvap, XAT_SYSTEM);
}
if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
xoap->xoa_hidden =
((zp->z_pflags & ZFS_HIDDEN) != 0);
XVA_SET_RTN(xvap, XAT_HIDDEN);
}
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
xoap->xoa_nounlink =
((zp->z_pflags & ZFS_NOUNLINK) != 0);
XVA_SET_RTN(xvap, XAT_NOUNLINK);
}
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
xoap->xoa_immutable =
((zp->z_pflags & ZFS_IMMUTABLE) != 0);
XVA_SET_RTN(xvap, XAT_IMMUTABLE);
}
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
xoap->xoa_appendonly =
((zp->z_pflags & ZFS_APPENDONLY) != 0);
XVA_SET_RTN(xvap, XAT_APPENDONLY);
}
if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
xoap->xoa_nodump =
((zp->z_pflags & ZFS_NODUMP) != 0);
XVA_SET_RTN(xvap, XAT_NODUMP);
}
if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
xoap->xoa_opaque =
((zp->z_pflags & ZFS_OPAQUE) != 0);
XVA_SET_RTN(xvap, XAT_OPAQUE);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
xoap->xoa_av_quarantined =
((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
xoap->xoa_av_modified =
((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
vp->v_type == VREG) {
zfs_sa_get_scanstamp(zp, xvap);
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
XVA_SET_RTN(xvap, XAT_REPARSE);
}
if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
xoap->xoa_generation = zp->z_gen;
XVA_SET_RTN(xvap, XAT_GEN);
}
if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
xoap->xoa_offline =
((zp->z_pflags & ZFS_OFFLINE) != 0);
XVA_SET_RTN(xvap, XAT_OFFLINE);
}
if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
xoap->xoa_sparse =
((zp->z_pflags & ZFS_SPARSE) != 0);
XVA_SET_RTN(xvap, XAT_SPARSE);
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
xoap->xoa_projinherit =
((zp->z_pflags & ZFS_PROJINHERIT) != 0);
XVA_SET_RTN(xvap, XAT_PROJINHERIT);
}
if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
xoap->xoa_projid = zp->z_projid;
XVA_SET_RTN(xvap, XAT_PROJID);
}
}
ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime);
ZFS_TIME_DECODE(&vap->va_mtime, mtime);
ZFS_TIME_DECODE(&vap->va_ctime, ctime);
ZFS_TIME_DECODE(&vap->va_birthtime, crtime);
sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
vap->va_blksize = blksize;
vap->va_bytes = nblocks << 9; /* nblocks * 512 */
if (zp->z_blksz == 0) {
/*
* Block size hasn't been set; suggest maximal I/O transfers.
*/
vap->va_blksize = zfsvfs->z_max_blksz;
}
zfs_exit(zfsvfs, FTAG);
return (0);
}
/*
* Set the file attributes to the values contained in the
* vattr structure.
*
* IN: zp - znode of file to be modified.
* vap - new attribute values.
* If AT_XVATTR set, then optional attrs are being set
* flags - ATTR_UTIME set if non-default time values provided.
* - ATTR_NOACLCHECK (CIFS context only).
* cr - credentials of caller.
* mnt_ns - Unused on FreeBSD
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* vp - ctime updated, mtime updated if size changed.
*/
int
zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr, zidmap_t *mnt_ns)
{
vnode_t *vp = ZTOV(zp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
objset_t *os;
zilog_t *zilog;
dmu_tx_t *tx;
vattr_t oldva;
xvattr_t tmpxvattr;
uint_t mask = vap->va_mask;
uint_t saved_mask = 0;
uint64_t saved_mode;
int trim_mask = 0;
uint64_t new_mode;
uint64_t new_uid, new_gid;
uint64_t xattr_obj;
uint64_t mtime[2], ctime[2];
uint64_t projid = ZFS_INVALID_PROJID;
znode_t *attrzp;
int need_policy = FALSE;
int err, err2;
zfs_fuid_info_t *fuidp = NULL;
xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
xoptattr_t *xoap;
zfs_acl_t *aclp;
boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
boolean_t fuid_dirtied = B_FALSE;
sa_bulk_attr_t bulk[7], xattr_bulk[7];
int count = 0, xattr_count = 0;
if (mask == 0)
return (0);
if (mask & AT_NOSET)
return (SET_ERROR(EINVAL));
if ((err = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (err);
os = zfsvfs->z_os;
zilog = zfsvfs->z_log;
/*
* Make sure that if we have ephemeral uid/gid or xvattr specified
* that file system is at proper version level
*/
if (zfsvfs->z_use_fuids == B_FALSE &&
(((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) ||
((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) ||
(mask & AT_XVATTR))) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
if (mask & AT_SIZE && vp->v_type == VDIR) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EISDIR));
}
if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* If this is an xvattr_t, then get a pointer to the structure of
* optional attributes. If this is NULL, then we have a vattr_t.
*/
xoap = xva_getxoptattr(xvap);
xva_init(&tmpxvattr);
/*
* Immutable files can only alter immutable bit and atime
*/
if ((zp->z_pflags & ZFS_IMMUTABLE) &&
((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) ||
((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
/*
* Note: ZFS_READONLY is handled in zfs_zaccess_common.
*/
/*
* Verify timestamps doesn't overflow 32 bits.
* ZFS can handle large timestamps, but 32bit syscalls can't
* handle times greater than 2039. This check should be removed
* once large timestamps are fully supported.
*/
if (mask & (AT_ATIME | AT_MTIME)) {
if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EOVERFLOW));
}
}
if (xoap != NULL && (mask & AT_XVATTR)) {
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME) &&
TIMESPEC_OVERFLOW(&vap->va_birthtime)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EOVERFLOW));
}
if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
if (!dmu_objset_projectquota_enabled(os) ||
(!S_ISREG(zp->z_mode) && !S_ISDIR(zp->z_mode))) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EOPNOTSUPP));
}
projid = xoap->xoa_projid;
if (unlikely(projid == ZFS_INVALID_PROJID)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
if (projid == zp->z_projid && zp->z_pflags & ZFS_PROJID)
projid = ZFS_INVALID_PROJID;
else
need_policy = TRUE;
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT) &&
(xoap->xoa_projinherit !=
((zp->z_pflags & ZFS_PROJINHERIT) != 0)) &&
(!dmu_objset_projectquota_enabled(os) ||
(!S_ISREG(zp->z_mode) && !S_ISDIR(zp->z_mode)))) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EOPNOTSUPP));
}
}
attrzp = NULL;
aclp = NULL;
if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EROFS));
}
/*
* First validate permissions
*/
if (mask & AT_SIZE) {
/*
* XXX - Note, we are not providing any open
* mode flags here (like FNDELAY), so we may
* block if there are locks present... this
* should be addressed in openat().
*/
/* XXX - would it be OK to generate a log record here? */
err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
if (err) {
zfs_exit(zfsvfs, FTAG);
return (err);
}
}
if (mask & (AT_ATIME|AT_MTIME) ||
((mask & AT_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
XVA_ISSET_REQ(xvap, XAT_READONLY) ||
XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
skipaclchk, cr, mnt_ns);
}
if (mask & (AT_UID|AT_GID)) {
int idmask = (mask & (AT_UID|AT_GID));
int take_owner;
int take_group;
/*
* NOTE: even if a new mode is being set,
* we may clear S_ISUID/S_ISGID bits.
*/
if (!(mask & AT_MODE))
vap->va_mode = zp->z_mode;
/*
* Take ownership or chgrp to group we are a member of
*/
take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr));
take_group = (mask & AT_GID) &&
zfs_groupmember(zfsvfs, vap->va_gid, cr);
/*
* If both AT_UID and AT_GID are set then take_owner and
* take_group must both be set in order to allow taking
* ownership.
*
* Otherwise, send the check through secpolicy_vnode_setattr()
*
*/
if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) ||
((idmask == AT_UID) && take_owner) ||
((idmask == AT_GID) && take_group)) {
if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
skipaclchk, cr, mnt_ns) == 0) {
/*
* Remove setuid/setgid for non-privileged users
*/
secpolicy_setid_clear(vap, vp, cr);
trim_mask = (mask & (AT_UID|AT_GID));
} else {
need_policy = TRUE;
}
} else {
need_policy = TRUE;
}
}
oldva.va_mode = zp->z_mode;
zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
if (mask & AT_XVATTR) {
/*
* Update xvattr mask to include only those attributes
* that are actually changing.
*
* the bits will be restored prior to actually setting
* the attributes so the caller thinks they were set.
*/
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
if (xoap->xoa_appendonly !=
((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_APPENDONLY);
XVA_SET_REQ(&tmpxvattr, XAT_APPENDONLY);
}
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
if (xoap->xoa_projinherit !=
((zp->z_pflags & ZFS_PROJINHERIT) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_PROJINHERIT);
XVA_SET_REQ(&tmpxvattr, XAT_PROJINHERIT);
}
}
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
if (xoap->xoa_nounlink !=
((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_NOUNLINK);
XVA_SET_REQ(&tmpxvattr, XAT_NOUNLINK);
}
}
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
if (xoap->xoa_immutable !=
((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
XVA_SET_REQ(&tmpxvattr, XAT_IMMUTABLE);
}
}
if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
if (xoap->xoa_nodump !=
((zp->z_pflags & ZFS_NODUMP) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_NODUMP);
XVA_SET_REQ(&tmpxvattr, XAT_NODUMP);
}
}
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
if (xoap->xoa_av_modified !=
((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
XVA_SET_REQ(&tmpxvattr, XAT_AV_MODIFIED);
}
}
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
if ((vp->v_type != VREG &&
xoap->xoa_av_quarantined) ||
xoap->xoa_av_quarantined !=
((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
XVA_SET_REQ(&tmpxvattr, XAT_AV_QUARANTINED);
}
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
if (need_policy == FALSE &&
(XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
need_policy = TRUE;
}
}
if (mask & AT_MODE) {
if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr,
mnt_ns) == 0) {
err = secpolicy_setid_setsticky_clear(vp, vap,
&oldva, cr);
if (err) {
zfs_exit(zfsvfs, FTAG);
return (err);
}
trim_mask |= AT_MODE;
} else {
need_policy = TRUE;
}
}
if (need_policy) {
/*
* If trim_mask is set then take ownership
* has been granted or write_acl is present and user
* has the ability to modify mode. In that case remove
* UID|GID and or MODE from mask so that
* secpolicy_vnode_setattr() doesn't revoke it.
*/
if (trim_mask) {
saved_mask = vap->va_mask;
vap->va_mask &= ~trim_mask;
if (trim_mask & AT_MODE) {
/*
* Save the mode, as secpolicy_vnode_setattr()
* will overwrite it with ova.va_mode.
*/
saved_mode = vap->va_mode;
}
}
err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
(int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
if (err) {
zfs_exit(zfsvfs, FTAG);
return (err);
}
if (trim_mask) {
vap->va_mask |= saved_mask;
if (trim_mask & AT_MODE) {
/*
* Recover the mode after
* secpolicy_vnode_setattr().
*/
vap->va_mode = saved_mode;
}
}
}
/*
* secpolicy_vnode_setattr, or take ownership may have
* changed va_mask
*/
mask = vap->va_mask;
if ((mask & (AT_UID | AT_GID)) || projid != ZFS_INVALID_PROJID) {
err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj, sizeof (xattr_obj));
if (err == 0 && xattr_obj) {
err = zfs_zget(zp->z_zfsvfs, xattr_obj, &attrzp);
if (err == 0) {
err = vn_lock(ZTOV(attrzp), LK_EXCLUSIVE);
if (err != 0)
vrele(ZTOV(attrzp));
}
if (err)
goto out2;
}
if (mask & AT_UID) {
new_uid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
if (new_uid != zp->z_uid &&
zfs_id_overquota(zfsvfs, DMU_USERUSED_OBJECT,
new_uid)) {
if (attrzp)
vput(ZTOV(attrzp));
err = SET_ERROR(EDQUOT);
goto out2;
}
}
if (mask & AT_GID) {
new_gid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid,
cr, ZFS_GROUP, &fuidp);
if (new_gid != zp->z_gid &&
zfs_id_overquota(zfsvfs, DMU_GROUPUSED_OBJECT,
new_gid)) {
if (attrzp)
vput(ZTOV(attrzp));
err = SET_ERROR(EDQUOT);
goto out2;
}
}
if (projid != ZFS_INVALID_PROJID &&
zfs_id_overquota(zfsvfs, DMU_PROJECTUSED_OBJECT, projid)) {
if (attrzp)
vput(ZTOV(attrzp));
err = SET_ERROR(EDQUOT);
goto out2;
}
}
tx = dmu_tx_create(os);
if (mask & AT_MODE) {
uint64_t pmode = zp->z_mode;
uint64_t acl_obj;
new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
if (zp->z_zfsvfs->z_acl_mode == ZFS_ACL_RESTRICTED &&
!(zp->z_pflags & ZFS_ACL_TRIVIAL)) {
err = SET_ERROR(EPERM);
goto out;
}
if ((err = zfs_acl_chmod_setattr(zp, &aclp, new_mode)))
goto out;
if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
/*
* Are we upgrading ACL from old V0 format
* to V1 format?
*/
if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
zfs_znode_acl_version(zp) ==
ZFS_ACL_VERSION_INITIAL) {
dmu_tx_hold_free(tx, acl_obj, 0,
DMU_OBJECT_END);
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, aclp->z_acl_bytes);
} else {
dmu_tx_hold_write(tx, acl_obj, 0,
aclp->z_acl_bytes);
}
} else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, aclp->z_acl_bytes);
}
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
} else {
if (((mask & AT_XVATTR) &&
XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) ||
(projid != ZFS_INVALID_PROJID &&
!(zp->z_pflags & ZFS_PROJID)))
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
else
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
}
if (attrzp) {
dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
}
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
zfs_sa_upgrade_txholds(tx, zp);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err)
goto out;
count = 0;
/*
* Set each attribute requested.
* We group settings according to the locks they need to acquire.
*
* Note: you cannot set ctime directly, although it will be
* updated as a side-effect of calling this function.
*/
if (projid != ZFS_INVALID_PROJID && !(zp->z_pflags & ZFS_PROJID)) {
/*
* For the existed object that is upgraded from old system,
* its on-disk layout has no slot for the project ID attribute.
* But quota accounting logic needs to access related slots by
* offset directly. So we need to adjust old objects' layout
* to make the project ID to some unified and fixed offset.
*/
if (attrzp)
err = sa_add_projid(attrzp->z_sa_hdl, tx, projid);
if (err == 0)
err = sa_add_projid(zp->z_sa_hdl, tx, projid);
if (unlikely(err == EEXIST))
err = 0;
else if (err != 0)
goto out;
else
projid = ZFS_INVALID_PROJID;
}
if (mask & (AT_UID|AT_GID|AT_MODE))
mutex_enter(&zp->z_acl_lock);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
if (attrzp) {
if (mask & (AT_UID|AT_GID|AT_MODE))
mutex_enter(&attrzp->z_acl_lock);
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
sizeof (attrzp->z_pflags));
if (projid != ZFS_INVALID_PROJID) {
attrzp->z_projid = projid;
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_PROJID(zfsvfs), NULL, &attrzp->z_projid,
sizeof (attrzp->z_projid));
}
}
if (mask & (AT_UID|AT_GID)) {
if (mask & AT_UID) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&new_uid, sizeof (new_uid));
zp->z_uid = new_uid;
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_UID(zfsvfs), NULL, &new_uid,
sizeof (new_uid));
attrzp->z_uid = new_uid;
}
}
if (mask & AT_GID) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
NULL, &new_gid, sizeof (new_gid));
zp->z_gid = new_gid;
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_GID(zfsvfs), NULL, &new_gid,
sizeof (new_gid));
attrzp->z_gid = new_gid;
}
}
if (!(mask & AT_MODE)) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
NULL, &new_mode, sizeof (new_mode));
new_mode = zp->z_mode;
}
err = zfs_acl_chown_setattr(zp);
ASSERT0(err);
if (attrzp) {
vn_seqc_write_begin(ZTOV(attrzp));
err = zfs_acl_chown_setattr(attrzp);
vn_seqc_write_end(ZTOV(attrzp));
ASSERT0(err);
}
}
if (mask & AT_MODE) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&new_mode, sizeof (new_mode));
zp->z_mode = new_mode;
ASSERT3P(aclp, !=, NULL);
err = zfs_aclset_common(zp, aclp, cr, tx);
ASSERT0(err);
if (zp->z_acl_cached)
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = aclp;
aclp = NULL;
}
if (mask & AT_ATIME) {
ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
&zp->z_atime, sizeof (zp->z_atime));
}
if (mask & AT_MTIME) {
ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
mtime, sizeof (mtime));
}
if (projid != ZFS_INVALID_PROJID) {
zp->z_projid = projid;
SA_ADD_BULK_ATTR(bulk, count,
SA_ZPL_PROJID(zfsvfs), NULL, &zp->z_projid,
sizeof (zp->z_projid));
}
/* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
if (mask & AT_SIZE && !(mask & AT_MTIME)) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
NULL, mtime, sizeof (mtime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, sizeof (ctime));
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
} else if (mask != 0) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, sizeof (ctime));
zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime);
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, sizeof (ctime));
zfs_tstamp_update_setup(attrzp, STATE_CHANGED,
mtime, ctime);
}
}
/*
* Do this after setting timestamps to prevent timestamp
* update from toggling bit
*/
if (xoap && (mask & AT_XVATTR)) {
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME))
xoap->xoa_createtime = vap->va_birthtime;
/*
* restore trimmed off masks
* so that return masks can be set for caller.
*/
if (XVA_ISSET_REQ(&tmpxvattr, XAT_APPENDONLY)) {
XVA_SET_REQ(xvap, XAT_APPENDONLY);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_NOUNLINK)) {
XVA_SET_REQ(xvap, XAT_NOUNLINK);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_IMMUTABLE)) {
XVA_SET_REQ(xvap, XAT_IMMUTABLE);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_NODUMP)) {
XVA_SET_REQ(xvap, XAT_NODUMP);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_MODIFIED)) {
XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_QUARANTINED)) {
XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_PROJINHERIT)) {
XVA_SET_REQ(xvap, XAT_PROJINHERIT);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
ASSERT3S(vp->v_type, ==, VREG);
zfs_xvattr_set(zp, xvap, tx);
}
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
if (mask != 0)
zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
if (mask & (AT_UID|AT_GID|AT_MODE))
mutex_exit(&zp->z_acl_lock);
if (attrzp) {
if (mask & (AT_UID|AT_GID|AT_MODE))
mutex_exit(&attrzp->z_acl_lock);
}
out:
if (err == 0 && attrzp) {
err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
xattr_count, tx);
ASSERT0(err2);
}
if (attrzp)
vput(ZTOV(attrzp));
if (aclp)
zfs_acl_free(aclp);
if (fuidp) {
zfs_fuid_info_free(fuidp);
fuidp = NULL;
}
if (err) {
dmu_tx_abort(tx);
} else {
err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
dmu_tx_commit(tx);
}
out2:
if (os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (err);
}
/*
* Look up the directory entries corresponding to the source and target
* directory/name pairs.
*/
static int
zfs_rename_relock_lookup(znode_t *sdzp, const struct componentname *scnp,
znode_t **szpp, znode_t *tdzp, const struct componentname *tcnp,
znode_t **tzpp)
{
zfsvfs_t *zfsvfs;
znode_t *szp, *tzp;
int error;
/*
* Before using sdzp and tdzp we must ensure that they are live.
* As a porting legacy from illumos we have two things to worry
* about. One is typical for FreeBSD and it is that the vnode is
* not reclaimed (doomed). The other is that the znode is live.
* The current code can invalidate the znode without acquiring the
* corresponding vnode lock if the object represented by the znode
* and vnode is no longer valid after a rollback or receive operation.
* z_teardown_lock hidden behind zfs_enter and zfs_exit is the lock
* that protects the znodes from the invalidation.
*/
zfsvfs = sdzp->z_zfsvfs;
ASSERT3P(zfsvfs, ==, tdzp->z_zfsvfs);
if ((error = zfs_enter_verify_zp(zfsvfs, sdzp, FTAG)) != 0)
return (error);
if ((error = zfs_verify_zp(tdzp)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Re-resolve svp to be certain it still exists and fetch the
* correct vnode.
*/
error = zfs_dirent_lookup(sdzp, scnp->cn_nameptr, &szp, ZEXISTS);
if (error != 0) {
/* Source entry invalid or not there. */
if ((scnp->cn_flags & ISDOTDOT) != 0 ||
(scnp->cn_namelen == 1 && scnp->cn_nameptr[0] == '.'))
error = SET_ERROR(EINVAL);
goto out;
}
*szpp = szp;
/*
* Re-resolve tvp, if it disappeared we just carry on.
*/
error = zfs_dirent_lookup(tdzp, tcnp->cn_nameptr, &tzp, 0);
if (error != 0) {
vrele(ZTOV(szp));
if ((tcnp->cn_flags & ISDOTDOT) != 0)
error = SET_ERROR(EINVAL);
goto out;
}
*tzpp = tzp;
out:
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* We acquire all but fdvp locks using non-blocking acquisitions. If we
* fail to acquire any lock in the path we will drop all held locks,
* acquire the new lock in a blocking fashion, and then release it and
* restart the rename. This acquire/release step ensures that we do not
* spin on a lock waiting for release. On error release all vnode locks
* and decrement references the way tmpfs_rename() would do.
*/
static int
zfs_rename_relock(struct vnode *sdvp, struct vnode **svpp,
struct vnode *tdvp, struct vnode **tvpp,
const struct componentname *scnp, const struct componentname *tcnp)
{
struct vnode *nvp, *svp, *tvp;
znode_t *sdzp, *tdzp, *szp, *tzp;
int error;
VOP_UNLOCK1(tdvp);
if (*tvpp != NULL && *tvpp != tdvp)
VOP_UNLOCK1(*tvpp);
relock:
error = vn_lock(sdvp, LK_EXCLUSIVE);
if (error)
goto out;
error = vn_lock(tdvp, LK_EXCLUSIVE | LK_NOWAIT);
if (error != 0) {
VOP_UNLOCK1(sdvp);
if (error != EBUSY)
goto out;
error = vn_lock(tdvp, LK_EXCLUSIVE);
if (error)
goto out;
VOP_UNLOCK1(tdvp);
goto relock;
}
tdzp = VTOZ(tdvp);
sdzp = VTOZ(sdvp);
error = zfs_rename_relock_lookup(sdzp, scnp, &szp, tdzp, tcnp, &tzp);
if (error != 0) {
VOP_UNLOCK1(sdvp);
VOP_UNLOCK1(tdvp);
goto out;
}
svp = ZTOV(szp);
tvp = tzp != NULL ? ZTOV(tzp) : NULL;
/*
* Now try acquire locks on svp and tvp.
*/
nvp = svp;
error = vn_lock(nvp, LK_EXCLUSIVE | LK_NOWAIT);
if (error != 0) {
VOP_UNLOCK1(sdvp);
VOP_UNLOCK1(tdvp);
if (tvp != NULL)
vrele(tvp);
if (error != EBUSY) {
vrele(nvp);
goto out;
}
error = vn_lock(nvp, LK_EXCLUSIVE);
if (error != 0) {
vrele(nvp);
goto out;
}
VOP_UNLOCK1(nvp);
/*
* Concurrent rename race.
* XXX ?
*/
if (nvp == tdvp) {
vrele(nvp);
error = SET_ERROR(EINVAL);
goto out;
}
vrele(*svpp);
*svpp = nvp;
goto relock;
}
vrele(*svpp);
*svpp = nvp;
if (*tvpp != NULL)
vrele(*tvpp);
*tvpp = NULL;
if (tvp != NULL) {
nvp = tvp;
error = vn_lock(nvp, LK_EXCLUSIVE | LK_NOWAIT);
if (error != 0) {
VOP_UNLOCK1(sdvp);
VOP_UNLOCK1(tdvp);
VOP_UNLOCK1(*svpp);
if (error != EBUSY) {
vrele(nvp);
goto out;
}
error = vn_lock(nvp, LK_EXCLUSIVE);
if (error != 0) {
vrele(nvp);
goto out;
}
vput(nvp);
goto relock;
}
*tvpp = nvp;
}
return (0);
out:
return (error);
}
/*
* Note that we must use VRELE_ASYNC in this function as it walks
* up the directory tree and vrele may need to acquire an exclusive
* lock if a last reference to a vnode is dropped.
*/
static int
zfs_rename_check(znode_t *szp, znode_t *sdzp, znode_t *tdzp)
{
zfsvfs_t *zfsvfs;
znode_t *zp, *zp1;
uint64_t parent;
int error;
zfsvfs = tdzp->z_zfsvfs;
if (tdzp == szp)
return (SET_ERROR(EINVAL));
if (tdzp == sdzp)
return (0);
if (tdzp->z_id == zfsvfs->z_root)
return (0);
zp = tdzp;
for (;;) {
ASSERT(!zp->z_unlinked);
if ((error = sa_lookup(zp->z_sa_hdl,
SA_ZPL_PARENT(zfsvfs), &parent, sizeof (parent))) != 0)
break;
if (parent == szp->z_id) {
error = SET_ERROR(EINVAL);
break;
}
if (parent == zfsvfs->z_root)
break;
if (parent == sdzp->z_id)
break;
error = zfs_zget(zfsvfs, parent, &zp1);
if (error != 0)
break;
if (zp != tdzp)
VN_RELE_ASYNC(ZTOV(zp),
dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)));
zp = zp1;
}
if (error == ENOTDIR)
panic("checkpath: .. not a directory\n");
if (zp != tdzp)
VN_RELE_ASYNC(ZTOV(zp),
dsl_pool_zrele_taskq(dmu_objset_pool(zfsvfs->z_os)));
return (error);
}
#if __FreeBSD_version < 1300124
static void
cache_vop_rename(struct vnode *fdvp, struct vnode *fvp, struct vnode *tdvp,
struct vnode *tvp, struct componentname *fcnp, struct componentname *tcnp)
{
cache_purge(fvp);
if (tvp != NULL)
cache_purge(tvp);
cache_purge_negative(tdvp);
}
#endif
static int
zfs_do_rename_impl(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
vnode_t *tdvp, vnode_t **tvpp, struct componentname *tcnp,
cred_t *cr);
/*
* Move an entry from the provided source directory to the target
* directory. Change the entry name as indicated.
*
* IN: sdvp - Source directory containing the "old entry".
* scnp - Old entry name.
* tdvp - Target directory to contain the "new entry".
* tcnp - New entry name.
* cr - credentials of caller.
* INOUT: svpp - Source file
* tvpp - Target file, may point to NULL initially
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* sdvp,tdvp - ctime|mtime updated
*/
static int
zfs_do_rename(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
vnode_t *tdvp, vnode_t **tvpp, struct componentname *tcnp,
cred_t *cr)
{
int error;
ASSERT_VOP_ELOCKED(tdvp, __func__);
if (*tvpp != NULL)
ASSERT_VOP_ELOCKED(*tvpp, __func__);
/* Reject renames across filesystems. */
if ((*svpp)->v_mount != tdvp->v_mount ||
((*tvpp) != NULL && (*svpp)->v_mount != (*tvpp)->v_mount)) {
error = SET_ERROR(EXDEV);
goto out;
}
if (zfsctl_is_node(tdvp)) {
error = SET_ERROR(EXDEV);
goto out;
}
/*
* Lock all four vnodes to ensure safety and semantics of renaming.
*/
error = zfs_rename_relock(sdvp, svpp, tdvp, tvpp, scnp, tcnp);
if (error != 0) {
/* no vnodes are locked in the case of error here */
return (error);
}
error = zfs_do_rename_impl(sdvp, svpp, scnp, tdvp, tvpp, tcnp, cr);
VOP_UNLOCK1(sdvp);
VOP_UNLOCK1(*svpp);
out:
if (*tvpp != NULL)
VOP_UNLOCK1(*tvpp);
if (tdvp != *tvpp)
VOP_UNLOCK1(tdvp);
return (error);
}
static int
zfs_do_rename_impl(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
vnode_t *tdvp, vnode_t **tvpp, struct componentname *tcnp,
cred_t *cr)
{
dmu_tx_t *tx;
zfsvfs_t *zfsvfs;
zilog_t *zilog;
znode_t *tdzp, *sdzp, *tzp, *szp;
const char *snm = scnp->cn_nameptr;
const char *tnm = tcnp->cn_nameptr;
int error;
tdzp = VTOZ(tdvp);
sdzp = VTOZ(sdvp);
zfsvfs = tdzp->z_zfsvfs;
if ((error = zfs_enter_verify_zp(zfsvfs, tdzp, FTAG)) != 0)
return (error);
if ((error = zfs_verify_zp(sdzp)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
zilog = zfsvfs->z_log;
if (zfsvfs->z_utf8 && u8_validate(tnm,
strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
error = SET_ERROR(EILSEQ);
goto out;
}
/* If source and target are the same file, there is nothing to do. */
if ((*svpp) == (*tvpp)) {
error = 0;
goto out;
}
if (((*svpp)->v_type == VDIR && (*svpp)->v_mountedhere != NULL) ||
((*tvpp) != NULL && (*tvpp)->v_type == VDIR &&
(*tvpp)->v_mountedhere != NULL)) {
error = SET_ERROR(EXDEV);
goto out;
}
szp = VTOZ(*svpp);
if ((error = zfs_verify_zp(szp)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
tzp = *tvpp == NULL ? NULL : VTOZ(*tvpp);
if (tzp != NULL) {
if ((error = zfs_verify_zp(tzp)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
}
/*
* This is to prevent the creation of links into attribute space
* by renaming a linked file into/outof an attribute directory.
* See the comment in zfs_link() for why this is considered bad.
*/
if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
error = SET_ERROR(EINVAL);
goto out;
}
/*
* If we are using project inheritance, means if the directory has
* ZFS_PROJINHERIT set, then its descendant directories will inherit
* not only the project ID, but also the ZFS_PROJINHERIT flag. Under
* such case, we only allow renames into our tree when the project
* IDs are the same.
*/
if (tdzp->z_pflags & ZFS_PROJINHERIT &&
tdzp->z_projid != szp->z_projid) {
error = SET_ERROR(EXDEV);
goto out;
}
/*
* Must have write access at the source to remove the old entry
* and write access at the target to create the new entry.
* Note that if target and source are the same, this can be
* done in a single check.
*/
if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr, NULL)))
goto out;
if ((*svpp)->v_type == VDIR) {
/*
* Avoid ".", "..", and aliases of "." for obvious reasons.
*/
if ((scnp->cn_namelen == 1 && scnp->cn_nameptr[0] == '.') ||
sdzp == szp ||
(scnp->cn_flags | tcnp->cn_flags) & ISDOTDOT) {
error = EINVAL;
goto out;
}
/*
* Check to make sure rename is valid.
* Can't do a move like this: /usr/a/b to /usr/a/b/c/d
*/
if ((error = zfs_rename_check(szp, sdzp, tdzp)))
goto out;
}
/*
* Does target exist?
*/
if (tzp) {
/*
* Source and target must be the same type.
*/
if ((*svpp)->v_type == VDIR) {
if ((*tvpp)->v_type != VDIR) {
error = SET_ERROR(ENOTDIR);
goto out;
} else {
cache_purge(tdvp);
if (sdvp != tdvp)
cache_purge(sdvp);
}
} else {
if ((*tvpp)->v_type == VDIR) {
error = SET_ERROR(EISDIR);
goto out;
}
}
}
vn_seqc_write_begin(*svpp);
vn_seqc_write_begin(sdvp);
if (*tvpp != NULL)
vn_seqc_write_begin(*tvpp);
if (tdvp != *tvpp)
vn_seqc_write_begin(tdvp);
vnevent_rename_src(*svpp, sdvp, scnp->cn_nameptr, ct);
if (tzp)
vnevent_rename_dest(*tvpp, tdvp, tnm, ct);
/*
* notify the target directory if it is not the same
* as source directory.
*/
if (tdvp != sdvp) {
vnevent_rename_dest_dir(tdvp, ct);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
if (sdzp != tdzp) {
dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, tdzp);
}
if (tzp) {
dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, tzp);
}
zfs_sa_upgrade_txholds(tx, szp);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
goto out_seq;
}
if (tzp) /* Attempt to remove the existing target */
error = zfs_link_destroy(tdzp, tnm, tzp, tx, 0, NULL);
if (error == 0) {
error = zfs_link_create(tdzp, tnm, szp, tx, ZRENAMING);
if (error == 0) {
szp->z_pflags |= ZFS_AV_MODIFIED;
error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
(void *)&szp->z_pflags, sizeof (uint64_t), tx);
ASSERT0(error);
error = zfs_link_destroy(sdzp, snm, szp, tx, ZRENAMING,
NULL);
if (error == 0) {
zfs_log_rename(zilog, tx, TX_RENAME, sdzp,
snm, tdzp, tnm, szp);
} else {
/*
* At this point, we have successfully created
* the target name, but have failed to remove
* the source name. Since the create was done
* with the ZRENAMING flag, there are
* complications; for one, the link count is
* wrong. The easiest way to deal with this
* is to remove the newly created target, and
* return the original error. This must
* succeed; fortunately, it is very unlikely to
* fail, since we just created it.
*/
VERIFY0(zfs_link_destroy(tdzp, tnm, szp, tx,
ZRENAMING, NULL));
}
}
if (error == 0) {
cache_vop_rename(sdvp, *svpp, tdvp, *tvpp, scnp, tcnp);
}
}
dmu_tx_commit(tx);
out_seq:
vn_seqc_write_end(*svpp);
vn_seqc_write_end(sdvp);
if (*tvpp != NULL)
vn_seqc_write_end(*tvpp);
if (tdvp != *tvpp)
vn_seqc_write_end(tdvp);
out:
if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
}
int
zfs_rename(znode_t *sdzp, const char *sname, znode_t *tdzp, const char *tname,
cred_t *cr, int flags, uint64_t rflags, vattr_t *wo_vap, zidmap_t *mnt_ns)
{
struct componentname scn, tcn;
vnode_t *sdvp, *tdvp;
vnode_t *svp, *tvp;
int error;
svp = tvp = NULL;
if (rflags != 0 || wo_vap != NULL)
return (SET_ERROR(EINVAL));
sdvp = ZTOV(sdzp);
tdvp = ZTOV(tdzp);
error = zfs_lookup_internal(sdzp, sname, &svp, &scn, DELETE);
if (sdzp->z_zfsvfs->z_replay == B_FALSE)
VOP_UNLOCK1(sdvp);
if (error != 0)
goto fail;
VOP_UNLOCK1(svp);
vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY);
error = zfs_lookup_internal(tdzp, tname, &tvp, &tcn, RENAME);
if (error == EJUSTRETURN)
tvp = NULL;
else if (error != 0) {
VOP_UNLOCK1(tdvp);
goto fail;
}
error = zfs_do_rename(sdvp, &svp, &scn, tdvp, &tvp, &tcn, cr);
fail:
if (svp != NULL)
vrele(svp);
if (tvp != NULL)
vrele(tvp);
return (error);
}
/*
* Insert the indicated symbolic reference entry into the directory.
*
* IN: dvp - Directory to contain new symbolic link.
* link - Name for new symlink entry.
* vap - Attributes of new entry.
* cr - credentials of caller.
* ct - caller context
* flags - case flags
* mnt_ns - Unused on FreeBSD
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dvp - ctime|mtime updated
*/
int
zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap,
const char *link, znode_t **zpp, cred_t *cr, int flags, zidmap_t *mnt_ns)
{
(void) flags;
znode_t *zp;
dmu_tx_t *tx;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zilog_t *zilog;
uint64_t len = strlen(link);
int error;
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
uint64_t txtype = TX_SYMLINK;
ASSERT3S(vap->va_type, ==, VLNK);
if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
return (error);
zilog = zfsvfs->z_log;
if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
if (len > MAXPATHLEN) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENAMETOOLONG));
}
if ((error = zfs_acl_ids_create(dzp, 0,
vap, cr, NULL, &acl_ids, NULL)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Attempt to lock directory; fail if entry already exists.
*/
error = zfs_dirent_lookup(dzp, name, &zp, ZNEW);
if (error) {
zfs_acl_ids_free(&acl_ids);
zfs_exit(zfsvfs, FTAG);
return (error);
}
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr, mnt_ns))) {
zfs_acl_ids_free(&acl_ids);
zfs_exit(zfsvfs, FTAG);
return (error);
}
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids,
0 /* projid */)) {
zfs_acl_ids_free(&acl_ids);
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EDQUOT));
}
getnewvnode_reserve_();
tx = dmu_tx_create(zfsvfs->z_os);
fuid_dirtied = zfsvfs->z_fuid_dirty;
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE + len);
dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
acl_ids.z_aclp->z_acl_bytes);
}
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
getnewvnode_drop_reserve();
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Create a new object for the symlink.
* for version 4 ZPL datasets the symlink will be an SA attribute
*/
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
if (zp->z_is_sa)
error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
__DECONST(void *, link), len, tx);
else
zfs_sa_symlink(zp, __DECONST(char *, link), len, tx);
zp->z_size = len;
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
&zp->z_size, sizeof (zp->z_size), tx);
/*
* Insert the new object into the directory.
*/
(void) zfs_link_create(dzp, name, zp, tx, ZNEW);
zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
*zpp = zp;
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
getnewvnode_drop_reserve();
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Return, in the buffer contained in the provided uio structure,
* the symbolic path referred to by vp.
*
* IN: vp - vnode of symbolic link.
* uio - structure to contain the link path.
* cr - credentials of caller.
* ct - caller context
*
* OUT: uio - structure containing the link path.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* vp - atime updated
*/
static int
zfs_readlink(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, caller_context_t *ct)
{
(void) cr, (void) ct;
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int error;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
if (zp->z_is_sa)
error = sa_lookup_uio(zp->z_sa_hdl,
SA_ZPL_SYMLINK(zfsvfs), uio);
else
error = zfs_sa_readlink(zp, uio);
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Insert a new entry into directory tdvp referencing svp.
*
* IN: tdvp - Directory to contain new entry.
* svp - vnode of new entry.
* name - name of new entry.
* cr - credentials of caller.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* tdvp - ctime|mtime updated
* svp - ctime updated
*/
int
zfs_link(znode_t *tdzp, znode_t *szp, const char *name, cred_t *cr,
int flags)
{
(void) flags;
znode_t *tzp;
zfsvfs_t *zfsvfs = tdzp->z_zfsvfs;
zilog_t *zilog;
dmu_tx_t *tx;
int error;
uint64_t parent;
uid_t owner;
ASSERT3S(ZTOV(tdzp)->v_type, ==, VDIR);
if ((error = zfs_enter_verify_zp(zfsvfs, tdzp, FTAG)) != 0)
return (error);
zilog = zfsvfs->z_log;
/*
* POSIX dictates that we return EPERM here.
* Better choices include ENOTSUP or EISDIR.
*/
if (ZTOV(szp)->v_type == VDIR) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
if ((error = zfs_verify_zp(szp)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* If we are using project inheritance, means if the directory has
* ZFS_PROJINHERIT set, then its descendant directories will inherit
* not only the project ID, but also the ZFS_PROJINHERIT flag. Under
* such case, we only allow hard link creation in our tree when the
* project IDs are the same.
*/
if (tdzp->z_pflags & ZFS_PROJINHERIT &&
tdzp->z_projid != szp->z_projid) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EXDEV));
}
if (szp->z_pflags & (ZFS_APPENDONLY |
ZFS_IMMUTABLE | ZFS_READONLY)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
/* Prevent links to .zfs/shares files */
if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (uint64_t))) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
if (parent == zfsvfs->z_shares_dir) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
if (zfsvfs->z_utf8 && u8_validate(name,
strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
/*
* We do not support links between attributes and non-attributes
* because of the potential security risk of creating links
* into "normal" file space in order to circumvent restrictions
* imposed in attribute space.
*/
if ((szp->z_pflags & ZFS_XATTR) != (tdzp->z_pflags & ZFS_XATTR)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
owner = zfs_fuid_map_id(zfsvfs, szp->z_uid, cr, ZFS_OWNER);
if (owner != crgetuid(cr) && secpolicy_basic_link(ZTOV(szp), cr) != 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
if ((error = zfs_zaccess(tdzp, ACE_ADD_FILE, 0, B_FALSE, cr, NULL))) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Attempt to lock directory; fail if entry already exists.
*/
error = zfs_dirent_lookup(tdzp, name, &tzp, ZNEW);
if (error) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, name);
zfs_sa_upgrade_txholds(tx, szp);
zfs_sa_upgrade_txholds(tx, tdzp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
zfs_exit(zfsvfs, FTAG);
return (error);
}
error = zfs_link_create(tdzp, name, szp, tx, 0);
if (error == 0) {
uint64_t txtype = TX_LINK;
zfs_log_link(zilog, tx, txtype, tdzp, szp, name);
}
dmu_tx_commit(tx);
if (error == 0) {
vnevent_link(ZTOV(szp), ct);
}
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Free or allocate space in a file. Currently, this function only
* supports the `F_FREESP' command. However, this command is somewhat
* misnamed, as its functionality includes the ability to allocate as
* well as free space.
*
* IN: ip - inode of file to free data in.
* cmd - action to take (only F_FREESP supported).
* bfp - section of file to free/alloc.
* flag - current file open mode flags.
* offset - current file offset.
* cr - credentials of caller.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* ip - ctime|mtime updated
*/
int
zfs_space(znode_t *zp, int cmd, flock64_t *bfp, int flag,
offset_t offset, cred_t *cr)
{
(void) offset;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
uint64_t off, len;
int error;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
if (cmd != F_FREESP) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Callers might not be able to detect properly that we are read-only,
* so check it explicitly here.
*/
if (zfs_is_readonly(zfsvfs)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EROFS));
}
if (bfp->l_len < 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Permissions aren't checked on Solaris because on this OS
* zfs_space() can only be called with an opened file handle.
* On Linux we can get here through truncate_range() which
* operates directly on inodes, so we need to check access rights.
*/
if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr, NULL))) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
off = bfp->l_start;
len = bfp->l_len; /* 0 means from off to end of file */
error = zfs_freesp(zp, off, len, flag, TRUE);
zfs_exit(zfsvfs, FTAG);
return (error);
}
static void
zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
{
(void) cr, (void) ct;
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int error;
ZFS_TEARDOWN_INACTIVE_ENTER_READ(zfsvfs);
if (zp->z_sa_hdl == NULL) {
/*
* The fs has been unmounted, or we did a
* suspend/resume and this file no longer exists.
*/
ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
vrecycle(vp);
return;
}
if (zp->z_unlinked) {
/*
* Fast path to recycle a vnode of a removed file.
*/
ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
vrecycle(vp);
return;
}
if (zp->z_atime_dirty && zp->z_unlinked == 0) {
dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
} else {
(void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
(void *)&zp->z_atime, sizeof (zp->z_atime), tx);
zp->z_atime_dirty = 0;
dmu_tx_commit(tx);
}
}
ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
}
_Static_assert(sizeof (struct zfid_short) <= sizeof (struct fid),
"struct zfid_short bigger than struct fid");
_Static_assert(sizeof (struct zfid_long) <= sizeof (struct fid),
"struct zfid_long bigger than struct fid");
static int
zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
{
(void) ct;
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
uint32_t gen;
uint64_t gen64;
uint64_t object = zp->z_id;
zfid_short_t *zfid;
int size, i, error;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
&gen64, sizeof (uint64_t))) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
gen = (uint32_t)gen64;
size = (zfsvfs->z_parent != zfsvfs) ? LONG_FID_LEN : SHORT_FID_LEN;
fidp->fid_len = size;
zfid = (zfid_short_t *)fidp;
zfid->zf_len = size;
for (i = 0; i < sizeof (zfid->zf_object); i++)
zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
/* Must have a non-zero generation number to distinguish from .zfs */
if (gen == 0)
gen = 1;
for (i = 0; i < sizeof (zfid->zf_gen); i++)
zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
if (size == LONG_FID_LEN) {
uint64_t objsetid = dmu_objset_id(zfsvfs->z_os);
zfid_long_t *zlfid;
zlfid = (zfid_long_t *)fidp;
for (i = 0; i < sizeof (zlfid->zf_setid); i++)
zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
/* XXX - this should be the generation number for the objset */
for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
zlfid->zf_setgen[i] = 0;
}
zfs_exit(zfsvfs, FTAG);
return (0);
}
static int
zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
caller_context_t *ct)
{
znode_t *zp;
zfsvfs_t *zfsvfs;
int error;
switch (cmd) {
case _PC_LINK_MAX:
*valp = MIN(LONG_MAX, ZFS_LINK_MAX);
return (0);
case _PC_FILESIZEBITS:
*valp = 64;
return (0);
case _PC_MIN_HOLE_SIZE:
*valp = (int)SPA_MINBLOCKSIZE;
return (0);
case _PC_ACL_EXTENDED:
#if 0 /* POSIX ACLs are not implemented for ZFS on FreeBSD yet. */
zp = VTOZ(vp);
zfsvfs = zp->z_zfsvfs;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
*valp = zfsvfs->z_acl_type == ZFSACLTYPE_POSIX ? 1 : 0;
zfs_exit(zfsvfs, FTAG);
#else
*valp = 0;
#endif
return (0);
case _PC_ACL_NFS4:
zp = VTOZ(vp);
zfsvfs = zp->z_zfsvfs;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
*valp = zfsvfs->z_acl_type == ZFS_ACLTYPE_NFSV4 ? 1 : 0;
zfs_exit(zfsvfs, FTAG);
return (0);
case _PC_ACL_PATH_MAX:
*valp = ACL_MAX_ENTRIES;
return (0);
default:
return (EOPNOTSUPP);
}
}
static int
zfs_getpages(struct vnode *vp, vm_page_t *ma, int count, int *rbehind,
int *rahead)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
zfs_locked_range_t *lr;
vm_object_t object;
off_t start, end, obj_size;
uint_t blksz;
int pgsin_b, pgsin_a;
int error;
if (zfs_enter_verify_zp(zfsvfs, zp, FTAG) != 0)
return (zfs_vm_pagerret_error);
start = IDX_TO_OFF(ma[0]->pindex);
end = IDX_TO_OFF(ma[count - 1]->pindex + 1);
/*
* Lock a range covering all required and optional pages.
* Note that we need to handle the case of the block size growing.
*/
for (;;) {
blksz = zp->z_blksz;
lr = zfs_rangelock_tryenter(&zp->z_rangelock,
rounddown(start, blksz),
roundup(end, blksz) - rounddown(start, blksz), RL_READER);
if (lr == NULL) {
if (rahead != NULL) {
*rahead = 0;
rahead = NULL;
}
if (rbehind != NULL) {
*rbehind = 0;
rbehind = NULL;
}
break;
}
if (blksz == zp->z_blksz)
break;
zfs_rangelock_exit(lr);
}
object = ma[0]->object;
zfs_vmobject_wlock(object);
obj_size = object->un_pager.vnp.vnp_size;
zfs_vmobject_wunlock(object);
if (IDX_TO_OFF(ma[count - 1]->pindex) >= obj_size) {
if (lr != NULL)
zfs_rangelock_exit(lr);
zfs_exit(zfsvfs, FTAG);
return (zfs_vm_pagerret_bad);
}
pgsin_b = 0;
if (rbehind != NULL) {
pgsin_b = OFF_TO_IDX(start - rounddown(start, blksz));
pgsin_b = MIN(*rbehind, pgsin_b);
}
pgsin_a = 0;
if (rahead != NULL) {
pgsin_a = OFF_TO_IDX(roundup(end, blksz) - end);
if (end + IDX_TO_OFF(pgsin_a) >= obj_size)
pgsin_a = OFF_TO_IDX(round_page(obj_size) - end);
pgsin_a = MIN(*rahead, pgsin_a);
}
/*
* NB: we need to pass the exact byte size of the data that we expect
* to read after accounting for the file size. This is required because
* ZFS will panic if we request DMU to read beyond the end of the last
* allocated block.
*/
error = dmu_read_pages(zfsvfs->z_os, zp->z_id, ma, count, &pgsin_b,
&pgsin_a, MIN(end, obj_size) - (end - PAGE_SIZE));
if (lr != NULL)
zfs_rangelock_exit(lr);
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, count*PAGE_SIZE);
zfs_exit(zfsvfs, FTAG);
if (error != 0)
return (zfs_vm_pagerret_error);
VM_CNT_INC(v_vnodein);
VM_CNT_ADD(v_vnodepgsin, count + pgsin_b + pgsin_a);
if (rbehind != NULL)
*rbehind = pgsin_b;
if (rahead != NULL)
*rahead = pgsin_a;
return (zfs_vm_pagerret_ok);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_getpages_args {
struct vnode *a_vp;
vm_page_t *a_m;
int a_count;
int *a_rbehind;
int *a_rahead;
};
#endif
static int
zfs_freebsd_getpages(struct vop_getpages_args *ap)
{
return (zfs_getpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind,
ap->a_rahead));
}
static int
zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags,
int *rtvals)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
zfs_locked_range_t *lr;
dmu_tx_t *tx;
struct sf_buf *sf;
vm_object_t object;
vm_page_t m;
caddr_t va;
size_t tocopy;
size_t lo_len;
vm_ooffset_t lo_off;
vm_ooffset_t off;
uint_t blksz;
int ncount;
int pcount;
int err;
int i;
object = vp->v_object;
KASSERT(ma[0]->object == object, ("mismatching object"));
KASSERT(len > 0 && (len & PAGE_MASK) == 0, ("unexpected length"));
pcount = btoc(len);
ncount = pcount;
for (i = 0; i < pcount; i++)
rtvals[i] = zfs_vm_pagerret_error;
if (zfs_enter_verify_zp(zfsvfs, zp, FTAG) != 0)
return (zfs_vm_pagerret_error);
off = IDX_TO_OFF(ma[0]->pindex);
blksz = zp->z_blksz;
lo_off = rounddown(off, blksz);
lo_len = roundup(len + (off - lo_off), blksz);
lr = zfs_rangelock_enter(&zp->z_rangelock, lo_off, lo_len, RL_WRITER);
zfs_vmobject_wlock(object);
if (len + off > object->un_pager.vnp.vnp_size) {
if (object->un_pager.vnp.vnp_size > off) {
int pgoff;
len = object->un_pager.vnp.vnp_size - off;
ncount = btoc(len);
if ((pgoff = (int)len & PAGE_MASK) != 0) {
/*
* If the object is locked and the following
* conditions hold, then the page's dirty
* field cannot be concurrently changed by a
* pmap operation.
*/
m = ma[ncount - 1];
vm_page_assert_sbusied(m);
KASSERT(!pmap_page_is_write_mapped(m),
("zfs_putpages: page %p is not read-only",
m));
vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
pgoff);
}
} else {
len = 0;
ncount = 0;
}
if (ncount < pcount) {
for (i = ncount; i < pcount; i++) {
rtvals[i] = zfs_vm_pagerret_bad;
}
}
}
zfs_vmobject_wunlock(object);
if (ncount == 0)
goto out;
if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, zp->z_uid) ||
zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, zp->z_gid) ||
(zp->z_projid != ZFS_DEFAULT_PROJID &&
zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
zp->z_projid))) {
goto out;
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_write(tx, zp->z_id, off, len);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_tx_abort(tx);
goto out;
}
if (zp->z_blksz < PAGE_SIZE) {
for (i = 0; len > 0; off += tocopy, len -= tocopy, i++) {
tocopy = len > PAGE_SIZE ? PAGE_SIZE : len;
va = zfs_map_page(ma[i], &sf);
dmu_write(zfsvfs->z_os, zp->z_id, off, tocopy, va, tx);
zfs_unmap_page(sf);
}
} else {
err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, ma, tx);
}
if (err == 0) {
uint64_t mtime[2], ctime[2];
sa_bulk_attr_t bulk[3];
int count = 0;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
&mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
err = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
ASSERT0(err);
/*
* XXX we should be passing a callback to undirty
* but that would make the locking messier
*/
zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off,
len, 0, NULL, NULL);
zfs_vmobject_wlock(object);
for (i = 0; i < ncount; i++) {
rtvals[i] = zfs_vm_pagerret_ok;
vm_page_undirty(ma[i]);
}
zfs_vmobject_wunlock(object);
VM_CNT_INC(v_vnodeout);
VM_CNT_ADD(v_vnodepgsout, ncount);
}
dmu_tx_commit(tx);
out:
zfs_rangelock_exit(lr);
if ((flags & (zfs_vm_pagerput_sync | zfs_vm_pagerput_inval)) != 0 ||
zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zfsvfs->z_log, zp->z_id);
dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, len);
zfs_exit(zfsvfs, FTAG);
return (rtvals[0]);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_putpages_args {
struct vnode *a_vp;
vm_page_t *a_m;
int a_count;
int a_sync;
int *a_rtvals;
};
#endif
static int
zfs_freebsd_putpages(struct vop_putpages_args *ap)
{
return (zfs_putpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_sync,
ap->a_rtvals));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_bmap_args {
struct vnode *a_vp;
daddr_t a_bn;
struct bufobj **a_bop;
daddr_t *a_bnp;
int *a_runp;
int *a_runb;
};
#endif
static int
zfs_freebsd_bmap(struct vop_bmap_args *ap)
{
if (ap->a_bop != NULL)
*ap->a_bop = &ap->a_vp->v_bufobj;
if (ap->a_bnp != NULL)
*ap->a_bnp = ap->a_bn;
if (ap->a_runp != NULL)
*ap->a_runp = 0;
if (ap->a_runb != NULL)
*ap->a_runb = 0;
return (0);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_open_args {
struct vnode *a_vp;
int a_mode;
struct ucred *a_cred;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_open(struct vop_open_args *ap)
{
vnode_t *vp = ap->a_vp;
znode_t *zp = VTOZ(vp);
int error;
error = zfs_open(&vp, ap->a_mode, ap->a_cred);
if (error == 0)
vnode_create_vobject(vp, zp->z_size, ap->a_td);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_close_args {
struct vnode *a_vp;
int a_fflag;
struct ucred *a_cred;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_close(struct vop_close_args *ap)
{
return (zfs_close(ap->a_vp, ap->a_fflag, 1, 0, ap->a_cred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_ioctl_args {
struct vnode *a_vp;
ulong_t a_command;
caddr_t a_data;
int a_fflag;
struct ucred *cred;
struct thread *td;
};
#endif
static int
zfs_freebsd_ioctl(struct vop_ioctl_args *ap)
{
return (zfs_ioctl(ap->a_vp, ap->a_command, (intptr_t)ap->a_data,
ap->a_fflag, ap->a_cred, NULL));
}
static int
ioflags(int ioflags)
{
int flags = 0;
if (ioflags & IO_APPEND)
flags |= O_APPEND;
if (ioflags & IO_NDELAY)
flags |= O_NONBLOCK;
if (ioflags & IO_SYNC)
flags |= O_SYNC;
return (flags);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_read_args {
struct vnode *a_vp;
struct uio *a_uio;
int a_ioflag;
struct ucred *a_cred;
};
#endif
static int
zfs_freebsd_read(struct vop_read_args *ap)
{
zfs_uio_t uio;
zfs_uio_init(&uio, ap->a_uio);
return (zfs_read(VTOZ(ap->a_vp), &uio, ioflags(ap->a_ioflag),
ap->a_cred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_write_args {
struct vnode *a_vp;
struct uio *a_uio;
int a_ioflag;
struct ucred *a_cred;
};
#endif
static int
zfs_freebsd_write(struct vop_write_args *ap)
{
zfs_uio_t uio;
zfs_uio_init(&uio, ap->a_uio);
return (zfs_write(VTOZ(ap->a_vp), &uio, ioflags(ap->a_ioflag),
ap->a_cred));
}
#if __FreeBSD_version >= 1300102
/*
* VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see
* the comment above cache_fplookup for details.
*/
static int
zfs_freebsd_fplookup_vexec(struct vop_fplookup_vexec_args *v)
{
vnode_t *vp;
znode_t *zp;
uint64_t pflags;
vp = v->a_vp;
zp = VTOZ_SMR(vp);
if (__predict_false(zp == NULL))
return (EAGAIN);
pflags = atomic_load_64(&zp->z_pflags);
if (pflags & ZFS_AV_QUARANTINED)
return (EAGAIN);
if (pflags & ZFS_XATTR)
return (EAGAIN);
if ((pflags & ZFS_NO_EXECS_DENIED) == 0)
return (EAGAIN);
return (0);
}
#endif
#if __FreeBSD_version >= 1300139
static int
zfs_freebsd_fplookup_symlink(struct vop_fplookup_symlink_args *v)
{
vnode_t *vp;
znode_t *zp;
char *target;
vp = v->a_vp;
zp = VTOZ_SMR(vp);
if (__predict_false(zp == NULL)) {
return (EAGAIN);
}
target = atomic_load_consume_ptr(&zp->z_cached_symlink);
if (target == NULL) {
return (EAGAIN);
}
return (cache_symlink_resolve(v->a_fpl, target, strlen(target)));
}
#endif
#ifndef _SYS_SYSPROTO_H_
struct vop_access_args {
struct vnode *a_vp;
accmode_t a_accmode;
struct ucred *a_cred;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_access(struct vop_access_args *ap)
{
vnode_t *vp = ap->a_vp;
znode_t *zp = VTOZ(vp);
accmode_t accmode;
int error = 0;
if (ap->a_accmode == VEXEC) {
if (zfs_fastaccesschk_execute(zp, ap->a_cred) == 0)
return (0);
}
/*
* ZFS itself only knowns about VREAD, VWRITE, VEXEC and VAPPEND,
*/
accmode = ap->a_accmode & (VREAD|VWRITE|VEXEC|VAPPEND);
if (accmode != 0)
error = zfs_access(zp, accmode, 0, ap->a_cred);
/*
* VADMIN has to be handled by vaccess().
*/
if (error == 0) {
accmode = ap->a_accmode & ~(VREAD|VWRITE|VEXEC|VAPPEND);
if (accmode != 0) {
#if __FreeBSD_version >= 1300105
error = vaccess(vp->v_type, zp->z_mode, zp->z_uid,
zp->z_gid, accmode, ap->a_cred);
#else
error = vaccess(vp->v_type, zp->z_mode, zp->z_uid,
zp->z_gid, accmode, ap->a_cred, NULL);
#endif
}
}
/*
* For VEXEC, ensure that at least one execute bit is set for
* non-directories.
*/
if (error == 0 && (ap->a_accmode & VEXEC) != 0 && vp->v_type != VDIR &&
(zp->z_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
error = EACCES;
}
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_lookup_args {
struct vnode *a_dvp;
struct vnode **a_vpp;
struct componentname *a_cnp;
};
#endif
static int
zfs_freebsd_lookup(struct vop_lookup_args *ap, boolean_t cached)
{
struct componentname *cnp = ap->a_cnp;
char nm[NAME_MAX + 1];
ASSERT3U(cnp->cn_namelen, <, sizeof (nm));
strlcpy(nm, cnp->cn_nameptr, MIN(cnp->cn_namelen + 1, sizeof (nm)));
return (zfs_lookup(ap->a_dvp, nm, ap->a_vpp, cnp, cnp->cn_nameiop,
cnp->cn_cred, 0, cached));
}
static int
zfs_freebsd_cachedlookup(struct vop_cachedlookup_args *ap)
{
return (zfs_freebsd_lookup((struct vop_lookup_args *)ap, B_TRUE));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_lookup_args {
struct vnode *a_dvp;
struct vnode **a_vpp;
struct componentname *a_cnp;
};
#endif
static int
zfs_cache_lookup(struct vop_lookup_args *ap)
{
zfsvfs_t *zfsvfs;
zfsvfs = ap->a_dvp->v_mount->mnt_data;
if (zfsvfs->z_use_namecache)
return (vfs_cache_lookup(ap));
else
return (zfs_freebsd_lookup(ap, B_FALSE));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_create_args {
struct vnode *a_dvp;
struct vnode **a_vpp;
struct componentname *a_cnp;
struct vattr *a_vap;
};
#endif
static int
zfs_freebsd_create(struct vop_create_args *ap)
{
zfsvfs_t *zfsvfs;
struct componentname *cnp = ap->a_cnp;
vattr_t *vap = ap->a_vap;
znode_t *zp = NULL;
int rc, mode;
#if __FreeBSD_version < 1400068
ASSERT(cnp->cn_flags & SAVENAME);
#endif
vattr_init_mask(vap);
mode = vap->va_mode & ALLPERMS;
zfsvfs = ap->a_dvp->v_mount->mnt_data;
*ap->a_vpp = NULL;
rc = zfs_create(VTOZ(ap->a_dvp), cnp->cn_nameptr, vap, 0, mode,
&zp, cnp->cn_cred, 0 /* flag */, NULL /* vsecattr */, NULL);
if (rc == 0)
*ap->a_vpp = ZTOV(zp);
if (zfsvfs->z_use_namecache &&
rc == 0 && (cnp->cn_flags & MAKEENTRY) != 0)
cache_enter(ap->a_dvp, *ap->a_vpp, cnp);
return (rc);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_remove_args {
struct vnode *a_dvp;
struct vnode *a_vp;
struct componentname *a_cnp;
};
#endif
static int
zfs_freebsd_remove(struct vop_remove_args *ap)
{
#if __FreeBSD_version < 1400068
ASSERT(ap->a_cnp->cn_flags & SAVENAME);
#endif
return (zfs_remove_(ap->a_dvp, ap->a_vp, ap->a_cnp->cn_nameptr,
ap->a_cnp->cn_cred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_mkdir_args {
struct vnode *a_dvp;
struct vnode **a_vpp;
struct componentname *a_cnp;
struct vattr *a_vap;
};
#endif
static int
zfs_freebsd_mkdir(struct vop_mkdir_args *ap)
{
vattr_t *vap = ap->a_vap;
znode_t *zp = NULL;
int rc;
#if __FreeBSD_version < 1400068
ASSERT(ap->a_cnp->cn_flags & SAVENAME);
#endif
vattr_init_mask(vap);
*ap->a_vpp = NULL;
rc = zfs_mkdir(VTOZ(ap->a_dvp), ap->a_cnp->cn_nameptr, vap, &zp,
ap->a_cnp->cn_cred, 0, NULL, NULL);
if (rc == 0)
*ap->a_vpp = ZTOV(zp);
return (rc);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_rmdir_args {
struct vnode *a_dvp;
struct vnode *a_vp;
struct componentname *a_cnp;
};
#endif
static int
zfs_freebsd_rmdir(struct vop_rmdir_args *ap)
{
struct componentname *cnp = ap->a_cnp;
#if __FreeBSD_version < 1400068
ASSERT(cnp->cn_flags & SAVENAME);
#endif
return (zfs_rmdir_(ap->a_dvp, ap->a_vp, cnp->cn_nameptr, cnp->cn_cred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_readdir_args {
struct vnode *a_vp;
struct uio *a_uio;
struct ucred *a_cred;
int *a_eofflag;
int *a_ncookies;
cookie_t **a_cookies;
};
#endif
static int
zfs_freebsd_readdir(struct vop_readdir_args *ap)
{
zfs_uio_t uio;
zfs_uio_init(&uio, ap->a_uio);
return (zfs_readdir(ap->a_vp, &uio, ap->a_cred, ap->a_eofflag,
ap->a_ncookies, ap->a_cookies));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_fsync_args {
struct vnode *a_vp;
int a_waitfor;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_fsync(struct vop_fsync_args *ap)
{
return (zfs_fsync(VTOZ(ap->a_vp), 0, ap->a_td->td_ucred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_getattr_args {
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
};
#endif
static int
zfs_freebsd_getattr(struct vop_getattr_args *ap)
{
vattr_t *vap = ap->a_vap;
xvattr_t xvap;
ulong_t fflags = 0;
int error;
xva_init(&xvap);
xvap.xva_vattr = *vap;
xvap.xva_vattr.va_mask |= AT_XVATTR;
/* Convert chflags into ZFS-type flags. */
/* XXX: what about SF_SETTABLE?. */
XVA_SET_REQ(&xvap, XAT_IMMUTABLE);
XVA_SET_REQ(&xvap, XAT_APPENDONLY);
XVA_SET_REQ(&xvap, XAT_NOUNLINK);
XVA_SET_REQ(&xvap, XAT_NODUMP);
XVA_SET_REQ(&xvap, XAT_READONLY);
XVA_SET_REQ(&xvap, XAT_ARCHIVE);
XVA_SET_REQ(&xvap, XAT_SYSTEM);
XVA_SET_REQ(&xvap, XAT_HIDDEN);
XVA_SET_REQ(&xvap, XAT_REPARSE);
XVA_SET_REQ(&xvap, XAT_OFFLINE);
XVA_SET_REQ(&xvap, XAT_SPARSE);
error = zfs_getattr(ap->a_vp, (vattr_t *)&xvap, 0, ap->a_cred);
if (error != 0)
return (error);
/* Convert ZFS xattr into chflags. */
#define FLAG_CHECK(fflag, xflag, xfield) do { \
if (XVA_ISSET_RTN(&xvap, (xflag)) && (xfield) != 0) \
fflags |= (fflag); \
} while (0)
FLAG_CHECK(SF_IMMUTABLE, XAT_IMMUTABLE,
xvap.xva_xoptattrs.xoa_immutable);
FLAG_CHECK(SF_APPEND, XAT_APPENDONLY,
xvap.xva_xoptattrs.xoa_appendonly);
FLAG_CHECK(SF_NOUNLINK, XAT_NOUNLINK,
xvap.xva_xoptattrs.xoa_nounlink);
FLAG_CHECK(UF_ARCHIVE, XAT_ARCHIVE,
xvap.xva_xoptattrs.xoa_archive);
FLAG_CHECK(UF_NODUMP, XAT_NODUMP,
xvap.xva_xoptattrs.xoa_nodump);
FLAG_CHECK(UF_READONLY, XAT_READONLY,
xvap.xva_xoptattrs.xoa_readonly);
FLAG_CHECK(UF_SYSTEM, XAT_SYSTEM,
xvap.xva_xoptattrs.xoa_system);
FLAG_CHECK(UF_HIDDEN, XAT_HIDDEN,
xvap.xva_xoptattrs.xoa_hidden);
FLAG_CHECK(UF_REPARSE, XAT_REPARSE,
xvap.xva_xoptattrs.xoa_reparse);
FLAG_CHECK(UF_OFFLINE, XAT_OFFLINE,
xvap.xva_xoptattrs.xoa_offline);
FLAG_CHECK(UF_SPARSE, XAT_SPARSE,
xvap.xva_xoptattrs.xoa_sparse);
#undef FLAG_CHECK
*vap = xvap.xva_vattr;
vap->va_flags = fflags;
return (0);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_setattr_args {
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
};
#endif
static int
zfs_freebsd_setattr(struct vop_setattr_args *ap)
{
vnode_t *vp = ap->a_vp;
vattr_t *vap = ap->a_vap;
cred_t *cred = ap->a_cred;
xvattr_t xvap;
ulong_t fflags;
uint64_t zflags;
vattr_init_mask(vap);
vap->va_mask &= ~AT_NOSET;
xva_init(&xvap);
xvap.xva_vattr = *vap;
zflags = VTOZ(vp)->z_pflags;
if (vap->va_flags != VNOVAL) {
zfsvfs_t *zfsvfs = VTOZ(vp)->z_zfsvfs;
int error;
if (zfsvfs->z_use_fuids == B_FALSE)
return (EOPNOTSUPP);
fflags = vap->va_flags;
/*
* XXX KDM
* We need to figure out whether it makes sense to allow
* UF_REPARSE through, since we don't really have other
* facilities to handle reparse points and zfs_setattr()
* doesn't currently allow setting that attribute anyway.
*/
if ((fflags & ~(SF_IMMUTABLE|SF_APPEND|SF_NOUNLINK|UF_ARCHIVE|
UF_NODUMP|UF_SYSTEM|UF_HIDDEN|UF_READONLY|UF_REPARSE|
UF_OFFLINE|UF_SPARSE)) != 0)
return (EOPNOTSUPP);
/*
* Unprivileged processes are not permitted to unset system
* flags, or modify flags if any system flags are set.
* Privileged non-jail processes may not modify system flags
* if securelevel > 0 and any existing system flags are set.
* Privileged jail processes behave like privileged non-jail
* processes if the PR_ALLOW_CHFLAGS permission bit is set;
* otherwise, they behave like unprivileged processes.
*/
if (secpolicy_fs_owner(vp->v_mount, cred) == 0 ||
spl_priv_check_cred(cred, PRIV_VFS_SYSFLAGS) == 0) {
if (zflags &
(ZFS_IMMUTABLE | ZFS_APPENDONLY | ZFS_NOUNLINK)) {
error = securelevel_gt(cred, 0);
if (error != 0)
return (error);
}
} else {
/*
* Callers may only modify the file flags on
* objects they have VADMIN rights for.
*/
if ((error = VOP_ACCESS(vp, VADMIN, cred,
curthread)) != 0)
return (error);
if (zflags &
(ZFS_IMMUTABLE | ZFS_APPENDONLY |
ZFS_NOUNLINK)) {
return (EPERM);
}
if (fflags &
(SF_IMMUTABLE | SF_APPEND | SF_NOUNLINK)) {
return (EPERM);
}
}
#define FLAG_CHANGE(fflag, zflag, xflag, xfield) do { \
if (((fflags & (fflag)) && !(zflags & (zflag))) || \
((zflags & (zflag)) && !(fflags & (fflag)))) { \
XVA_SET_REQ(&xvap, (xflag)); \
(xfield) = ((fflags & (fflag)) != 0); \
} \
} while (0)
/* Convert chflags into ZFS-type flags. */
/* XXX: what about SF_SETTABLE?. */
FLAG_CHANGE(SF_IMMUTABLE, ZFS_IMMUTABLE, XAT_IMMUTABLE,
xvap.xva_xoptattrs.xoa_immutable);
FLAG_CHANGE(SF_APPEND, ZFS_APPENDONLY, XAT_APPENDONLY,
xvap.xva_xoptattrs.xoa_appendonly);
FLAG_CHANGE(SF_NOUNLINK, ZFS_NOUNLINK, XAT_NOUNLINK,
xvap.xva_xoptattrs.xoa_nounlink);
FLAG_CHANGE(UF_ARCHIVE, ZFS_ARCHIVE, XAT_ARCHIVE,
xvap.xva_xoptattrs.xoa_archive);
FLAG_CHANGE(UF_NODUMP, ZFS_NODUMP, XAT_NODUMP,
xvap.xva_xoptattrs.xoa_nodump);
FLAG_CHANGE(UF_READONLY, ZFS_READONLY, XAT_READONLY,
xvap.xva_xoptattrs.xoa_readonly);
FLAG_CHANGE(UF_SYSTEM, ZFS_SYSTEM, XAT_SYSTEM,
xvap.xva_xoptattrs.xoa_system);
FLAG_CHANGE(UF_HIDDEN, ZFS_HIDDEN, XAT_HIDDEN,
xvap.xva_xoptattrs.xoa_hidden);
FLAG_CHANGE(UF_REPARSE, ZFS_REPARSE, XAT_REPARSE,
xvap.xva_xoptattrs.xoa_reparse);
FLAG_CHANGE(UF_OFFLINE, ZFS_OFFLINE, XAT_OFFLINE,
xvap.xva_xoptattrs.xoa_offline);
FLAG_CHANGE(UF_SPARSE, ZFS_SPARSE, XAT_SPARSE,
xvap.xva_xoptattrs.xoa_sparse);
#undef FLAG_CHANGE
}
if (vap->va_birthtime.tv_sec != VNOVAL) {
xvap.xva_vattr.va_mask |= AT_XVATTR;
XVA_SET_REQ(&xvap, XAT_CREATETIME);
}
return (zfs_setattr(VTOZ(vp), (vattr_t *)&xvap, 0, cred, NULL));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_rename_args {
struct vnode *a_fdvp;
struct vnode *a_fvp;
struct componentname *a_fcnp;
struct vnode *a_tdvp;
struct vnode *a_tvp;
struct componentname *a_tcnp;
};
#endif
static int
zfs_freebsd_rename(struct vop_rename_args *ap)
{
vnode_t *fdvp = ap->a_fdvp;
vnode_t *fvp = ap->a_fvp;
vnode_t *tdvp = ap->a_tdvp;
vnode_t *tvp = ap->a_tvp;
int error;
#if __FreeBSD_version < 1400068
ASSERT(ap->a_fcnp->cn_flags & (SAVENAME|SAVESTART));
ASSERT(ap->a_tcnp->cn_flags & (SAVENAME|SAVESTART));
#endif
error = zfs_do_rename(fdvp, &fvp, ap->a_fcnp, tdvp, &tvp,
ap->a_tcnp, ap->a_fcnp->cn_cred);
vrele(fdvp);
vrele(fvp);
vrele(tdvp);
if (tvp != NULL)
vrele(tvp);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_symlink_args {
struct vnode *a_dvp;
struct vnode **a_vpp;
struct componentname *a_cnp;
struct vattr *a_vap;
char *a_target;
};
#endif
static int
zfs_freebsd_symlink(struct vop_symlink_args *ap)
{
struct componentname *cnp = ap->a_cnp;
vattr_t *vap = ap->a_vap;
znode_t *zp = NULL;
#if __FreeBSD_version >= 1300139
char *symlink;
size_t symlink_len;
#endif
int rc;
#if __FreeBSD_version < 1400068
ASSERT(cnp->cn_flags & SAVENAME);
#endif
vap->va_type = VLNK; /* FreeBSD: Syscall only sets va_mode. */
vattr_init_mask(vap);
*ap->a_vpp = NULL;
rc = zfs_symlink(VTOZ(ap->a_dvp), cnp->cn_nameptr, vap,
ap->a_target, &zp, cnp->cn_cred, 0 /* flags */, NULL);
if (rc == 0) {
*ap->a_vpp = ZTOV(zp);
ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);
#if __FreeBSD_version >= 1300139
MPASS(zp->z_cached_symlink == NULL);
symlink_len = strlen(ap->a_target);
symlink = cache_symlink_alloc(symlink_len + 1, M_WAITOK);
if (symlink != NULL) {
memcpy(symlink, ap->a_target, symlink_len);
symlink[symlink_len] = '\0';
atomic_store_rel_ptr((uintptr_t *)&zp->z_cached_symlink,
(uintptr_t)symlink);
}
#endif
}
return (rc);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_readlink_args {
struct vnode *a_vp;
struct uio *a_uio;
struct ucred *a_cred;
};
#endif
static int
zfs_freebsd_readlink(struct vop_readlink_args *ap)
{
zfs_uio_t uio;
int error;
#if __FreeBSD_version >= 1300139
znode_t *zp = VTOZ(ap->a_vp);
char *symlink, *base;
size_t symlink_len;
bool trycache;
#endif
zfs_uio_init(&uio, ap->a_uio);
#if __FreeBSD_version >= 1300139
trycache = false;
if (zfs_uio_segflg(&uio) == UIO_SYSSPACE &&
zfs_uio_iovcnt(&uio) == 1) {
base = zfs_uio_iovbase(&uio, 0);
symlink_len = zfs_uio_iovlen(&uio, 0);
trycache = true;
}
#endif
error = zfs_readlink(ap->a_vp, &uio, ap->a_cred, NULL);
#if __FreeBSD_version >= 1300139
if (atomic_load_ptr(&zp->z_cached_symlink) != NULL ||
error != 0 || !trycache) {
return (error);
}
symlink_len -= zfs_uio_resid(&uio);
symlink = cache_symlink_alloc(symlink_len + 1, M_WAITOK);
if (symlink != NULL) {
memcpy(symlink, base, symlink_len);
symlink[symlink_len] = '\0';
if (!atomic_cmpset_rel_ptr((uintptr_t *)&zp->z_cached_symlink,
(uintptr_t)NULL, (uintptr_t)symlink)) {
cache_symlink_free(symlink, symlink_len + 1);
}
}
#endif
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_link_args {
struct vnode *a_tdvp;
struct vnode *a_vp;
struct componentname *a_cnp;
};
#endif
static int
zfs_freebsd_link(struct vop_link_args *ap)
{
struct componentname *cnp = ap->a_cnp;
vnode_t *vp = ap->a_vp;
vnode_t *tdvp = ap->a_tdvp;
if (tdvp->v_mount != vp->v_mount)
return (EXDEV);
#if __FreeBSD_version < 1400068
ASSERT(cnp->cn_flags & SAVENAME);
#endif
return (zfs_link(VTOZ(tdvp), VTOZ(vp),
cnp->cn_nameptr, cnp->cn_cred, 0));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_inactive_args {
struct vnode *a_vp;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_inactive(struct vop_inactive_args *ap)
{
vnode_t *vp = ap->a_vp;
#if __FreeBSD_version >= 1300123
zfs_inactive(vp, curthread->td_ucred, NULL);
#else
zfs_inactive(vp, ap->a_td->td_ucred, NULL);
#endif
return (0);
}
#if __FreeBSD_version >= 1300042
#ifndef _SYS_SYSPROTO_H_
struct vop_need_inactive_args {
struct vnode *a_vp;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_need_inactive(struct vop_need_inactive_args *ap)
{
vnode_t *vp = ap->a_vp;
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int need;
if (vn_need_pageq_flush(vp))
return (1);
if (!ZFS_TEARDOWN_INACTIVE_TRY_ENTER_READ(zfsvfs))
return (1);
need = (zp->z_sa_hdl == NULL || zp->z_unlinked || zp->z_atime_dirty);
ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
return (need);
}
#endif
#ifndef _SYS_SYSPROTO_H_
struct vop_reclaim_args {
struct vnode *a_vp;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_reclaim(struct vop_reclaim_args *ap)
{
vnode_t *vp = ap->a_vp;
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
ASSERT3P(zp, !=, NULL);
#if __FreeBSD_version < 1300042
/* Destroy the vm object and flush associated pages. */
vnode_destroy_vobject(vp);
#endif
/*
* z_teardown_inactive_lock protects from a race with
* zfs_znode_dmu_fini in zfsvfs_teardown during
* force unmount.
*/
ZFS_TEARDOWN_INACTIVE_ENTER_READ(zfsvfs);
if (zp->z_sa_hdl == NULL)
zfs_znode_free(zp);
else
zfs_zinactive(zp);
ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
vp->v_data = NULL;
return (0);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_fid_args {
struct vnode *a_vp;
struct fid *a_fid;
};
#endif
static int
zfs_freebsd_fid(struct vop_fid_args *ap)
{
return (zfs_fid(ap->a_vp, (void *)ap->a_fid, NULL));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_pathconf_args {
struct vnode *a_vp;
int a_name;
register_t *a_retval;
} *ap;
#endif
static int
zfs_freebsd_pathconf(struct vop_pathconf_args *ap)
{
ulong_t val;
int error;
error = zfs_pathconf(ap->a_vp, ap->a_name, &val,
curthread->td_ucred, NULL);
if (error == 0) {
*ap->a_retval = val;
return (error);
}
if (error != EOPNOTSUPP)
return (error);
switch (ap->a_name) {
case _PC_NAME_MAX:
*ap->a_retval = NAME_MAX;
return (0);
#if __FreeBSD_version >= 1400032
case _PC_DEALLOC_PRESENT:
*ap->a_retval = 1;
return (0);
#endif
case _PC_PIPE_BUF:
if (ap->a_vp->v_type == VDIR || ap->a_vp->v_type == VFIFO) {
*ap->a_retval = PIPE_BUF;
return (0);
}
return (EINVAL);
default:
return (vop_stdpathconf(ap));
}
}
static int zfs_xattr_compat = 1;
static int
zfs_check_attrname(const char *name)
{
/* We don't allow '/' character in attribute name. */
if (strchr(name, '/') != NULL)
return (SET_ERROR(EINVAL));
/* We don't allow attribute names that start with a namespace prefix. */
if (ZFS_XA_NS_PREFIX_FORBIDDEN(name))
return (SET_ERROR(EINVAL));
return (0);
}
/*
* FreeBSD's extended attributes namespace defines file name prefix for ZFS'
* extended attribute name:
*
* NAMESPACE XATTR_COMPAT PREFIX
* system * freebsd:system:
* user 1 (none, can be used to access ZFS
* fsattr(5) attributes created on Solaris)
* user 0 user.
*/
static int
zfs_create_attrname(int attrnamespace, const char *name, char *attrname,
size_t size, boolean_t compat)
{
const char *namespace, *prefix, *suffix;
memset(attrname, 0, size);
switch (attrnamespace) {
case EXTATTR_NAMESPACE_USER:
if (compat) {
/*
* This is the default namespace by which we can access
* all attributes created on Solaris.
*/
prefix = namespace = suffix = "";
} else {
/*
* This is compatible with the user namespace encoding
* on Linux prior to xattr_compat, but nothing
* else.
*/
prefix = "";
namespace = "user";
suffix = ".";
}
break;
case EXTATTR_NAMESPACE_SYSTEM:
prefix = "freebsd:";
namespace = EXTATTR_NAMESPACE_SYSTEM_STRING;
suffix = ":";
break;
case EXTATTR_NAMESPACE_EMPTY:
default:
return (SET_ERROR(EINVAL));
}
if (snprintf(attrname, size, "%s%s%s%s", prefix, namespace, suffix,
name) >= size) {
return (SET_ERROR(ENAMETOOLONG));
}
return (0);
}
static int
zfs_ensure_xattr_cached(znode_t *zp)
{
int error = 0;
ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
if (zp->z_xattr_cached != NULL)
return (0);
if (rw_write_held(&zp->z_xattr_lock))
return (zfs_sa_get_xattr(zp));
if (!rw_tryupgrade(&zp->z_xattr_lock)) {
rw_exit(&zp->z_xattr_lock);
rw_enter(&zp->z_xattr_lock, RW_WRITER);
}
if (zp->z_xattr_cached == NULL)
error = zfs_sa_get_xattr(zp);
rw_downgrade(&zp->z_xattr_lock);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_getextattr {
IN struct vnode *a_vp;
IN int a_attrnamespace;
IN const char *a_name;
INOUT struct uio *a_uio;
OUT size_t *a_size;
IN struct ucred *a_cred;
IN struct thread *a_td;
};
#endif
static int
zfs_getextattr_dir(struct vop_getextattr_args *ap, const char *attrname)
{
struct thread *td = ap->a_td;
struct nameidata nd;
struct vattr va;
vnode_t *xvp = NULL, *vp;
int error, flags;
error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred,
LOOKUP_XATTR, B_FALSE);
if (error != 0)
return (error);
flags = FREAD;
#if __FreeBSD_version < 1400043
NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname,
xvp, td);
#else
NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname, xvp);
#endif
error = vn_open_cred(&nd, &flags, 0, VN_OPEN_INVFS, ap->a_cred, NULL);
if (error != 0)
return (SET_ERROR(error));
vp = nd.ni_vp;
NDFREE_PNBUF(&nd);
if (ap->a_size != NULL) {
error = VOP_GETATTR(vp, &va, ap->a_cred);
if (error == 0)
*ap->a_size = (size_t)va.va_size;
} else if (ap->a_uio != NULL)
error = VOP_READ(vp, ap->a_uio, IO_UNIT, ap->a_cred);
VOP_UNLOCK1(vp);
vn_close(vp, flags, ap->a_cred, td);
return (error);
}
static int
zfs_getextattr_sa(struct vop_getextattr_args *ap, const char *attrname)
{
znode_t *zp = VTOZ(ap->a_vp);
uchar_t *nv_value;
uint_t nv_size;
int error;
error = zfs_ensure_xattr_cached(zp);
if (error != 0)
return (error);
ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
ASSERT3P(zp->z_xattr_cached, !=, NULL);
error = nvlist_lookup_byte_array(zp->z_xattr_cached, attrname,
&nv_value, &nv_size);
if (error != 0)
return (SET_ERROR(error));
if (ap->a_size != NULL)
*ap->a_size = nv_size;
else if (ap->a_uio != NULL)
error = uiomove(nv_value, nv_size, ap->a_uio);
if (error != 0)
return (SET_ERROR(error));
return (0);
}
static int
zfs_getextattr_impl(struct vop_getextattr_args *ap, boolean_t compat)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
char attrname[EXTATTR_MAXNAMELEN+1];
int error;
error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
sizeof (attrname), compat);
if (error != 0)
return (error);
error = ENOENT;
if (zfsvfs->z_use_sa && zp->z_is_sa)
error = zfs_getextattr_sa(ap, attrname);
if (error == ENOENT)
error = zfs_getextattr_dir(ap, attrname);
return (error);
}
/*
* Vnode operation to retrieve a named extended attribute.
*/
static int
zfs_getextattr(struct vop_getextattr_args *ap)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
/*
* If the xattr property is off, refuse the request.
*/
if (!(zfsvfs->z_flags & ZSB_XATTR))
return (SET_ERROR(EOPNOTSUPP));
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VREAD);
if (error != 0)
return (SET_ERROR(error));
error = zfs_check_attrname(ap->a_name);
if (error != 0)
return (error);
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
error = ENOENT;
rw_enter(&zp->z_xattr_lock, RW_READER);
error = zfs_getextattr_impl(ap, zfs_xattr_compat);
if ((error == ENOENT || error == ENOATTR) &&
ap->a_attrnamespace == EXTATTR_NAMESPACE_USER) {
/*
* Fall back to the alternate namespace format if we failed to
* find a user xattr.
*/
error = zfs_getextattr_impl(ap, !zfs_xattr_compat);
}
rw_exit(&zp->z_xattr_lock);
zfs_exit(zfsvfs, FTAG);
if (error == ENOENT)
error = SET_ERROR(ENOATTR);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_deleteextattr {
IN struct vnode *a_vp;
IN int a_attrnamespace;
IN const char *a_name;
IN struct ucred *a_cred;
IN struct thread *a_td;
};
#endif
static int
zfs_deleteextattr_dir(struct vop_deleteextattr_args *ap, const char *attrname)
{
struct nameidata nd;
vnode_t *xvp = NULL, *vp;
int error;
error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred,
LOOKUP_XATTR, B_FALSE);
if (error != 0)
return (error);
#if __FreeBSD_version < 1400043
NDINIT_ATVP(&nd, DELETE, NOFOLLOW | LOCKPARENT | LOCKLEAF,
UIO_SYSSPACE, attrname, xvp, ap->a_td);
#else
NDINIT_ATVP(&nd, DELETE, NOFOLLOW | LOCKPARENT | LOCKLEAF,
UIO_SYSSPACE, attrname, xvp);
#endif
error = namei(&nd);
if (error != 0)
return (SET_ERROR(error));
vp = nd.ni_vp;
error = VOP_REMOVE(nd.ni_dvp, vp, &nd.ni_cnd);
NDFREE_PNBUF(&nd);
vput(nd.ni_dvp);
if (vp == nd.ni_dvp)
vrele(vp);
else
vput(vp);
return (error);
}
static int
zfs_deleteextattr_sa(struct vop_deleteextattr_args *ap, const char *attrname)
{
znode_t *zp = VTOZ(ap->a_vp);
nvlist_t *nvl;
int error;
error = zfs_ensure_xattr_cached(zp);
if (error != 0)
return (error);
ASSERT(RW_WRITE_HELD(&zp->z_xattr_lock));
ASSERT3P(zp->z_xattr_cached, !=, NULL);
nvl = zp->z_xattr_cached;
error = nvlist_remove(nvl, attrname, DATA_TYPE_BYTE_ARRAY);
if (error != 0)
error = SET_ERROR(error);
else
error = zfs_sa_set_xattr(zp, attrname, NULL, 0);
if (error != 0) {
zp->z_xattr_cached = NULL;
nvlist_free(nvl);
}
return (error);
}
static int
zfs_deleteextattr_impl(struct vop_deleteextattr_args *ap, boolean_t compat)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
char attrname[EXTATTR_MAXNAMELEN+1];
int error;
error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
sizeof (attrname), compat);
if (error != 0)
return (error);
error = ENOENT;
if (zfsvfs->z_use_sa && zp->z_is_sa)
error = zfs_deleteextattr_sa(ap, attrname);
if (error == ENOENT)
error = zfs_deleteextattr_dir(ap, attrname);
return (error);
}
/*
* Vnode operation to remove a named attribute.
*/
static int
zfs_deleteextattr(struct vop_deleteextattr_args *ap)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
/*
* If the xattr property is off, refuse the request.
*/
if (!(zfsvfs->z_flags & ZSB_XATTR))
return (SET_ERROR(EOPNOTSUPP));
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VWRITE);
if (error != 0)
return (SET_ERROR(error));
error = zfs_check_attrname(ap->a_name);
if (error != 0)
return (error);
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
rw_enter(&zp->z_xattr_lock, RW_WRITER);
error = zfs_deleteextattr_impl(ap, zfs_xattr_compat);
if ((error == ENOENT || error == ENOATTR) &&
ap->a_attrnamespace == EXTATTR_NAMESPACE_USER) {
/*
* Fall back to the alternate namespace format if we failed to
* find a user xattr.
*/
error = zfs_deleteextattr_impl(ap, !zfs_xattr_compat);
}
rw_exit(&zp->z_xattr_lock);
zfs_exit(zfsvfs, FTAG);
if (error == ENOENT)
error = SET_ERROR(ENOATTR);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_setextattr {
IN struct vnode *a_vp;
IN int a_attrnamespace;
IN const char *a_name;
INOUT struct uio *a_uio;
IN struct ucred *a_cred;
IN struct thread *a_td;
};
#endif
static int
zfs_setextattr_dir(struct vop_setextattr_args *ap, const char *attrname)
{
struct thread *td = ap->a_td;
struct nameidata nd;
struct vattr va;
vnode_t *xvp = NULL, *vp;
int error, flags;
error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred,
LOOKUP_XATTR | CREATE_XATTR_DIR, B_FALSE);
if (error != 0)
return (error);
flags = FFLAGS(O_WRONLY | O_CREAT);
#if __FreeBSD_version < 1400043
NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname, xvp, td);
#else
NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname, xvp);
#endif
error = vn_open_cred(&nd, &flags, 0600, VN_OPEN_INVFS, ap->a_cred,
NULL);
if (error != 0)
return (SET_ERROR(error));
vp = nd.ni_vp;
NDFREE_PNBUF(&nd);
VATTR_NULL(&va);
va.va_size = 0;
error = VOP_SETATTR(vp, &va, ap->a_cred);
if (error == 0)
VOP_WRITE(vp, ap->a_uio, IO_UNIT, ap->a_cred);
VOP_UNLOCK1(vp);
vn_close(vp, flags, ap->a_cred, td);
return (error);
}
static int
zfs_setextattr_sa(struct vop_setextattr_args *ap, const char *attrname)
{
znode_t *zp = VTOZ(ap->a_vp);
nvlist_t *nvl;
size_t sa_size;
int error;
error = zfs_ensure_xattr_cached(zp);
if (error != 0)
return (error);
ASSERT(RW_WRITE_HELD(&zp->z_xattr_lock));
ASSERT3P(zp->z_xattr_cached, !=, NULL);
nvl = zp->z_xattr_cached;
size_t entry_size = ap->a_uio->uio_resid;
if (entry_size > DXATTR_MAX_ENTRY_SIZE)
return (SET_ERROR(EFBIG));
error = nvlist_size(nvl, &sa_size, NV_ENCODE_XDR);
if (error != 0)
return (SET_ERROR(error));
if (sa_size > DXATTR_MAX_SA_SIZE)
return (SET_ERROR(EFBIG));
uchar_t *buf = kmem_alloc(entry_size, KM_SLEEP);
error = uiomove(buf, entry_size, ap->a_uio);
if (error != 0) {
error = SET_ERROR(error);
} else {
error = nvlist_add_byte_array(nvl, attrname, buf, entry_size);
if (error != 0)
error = SET_ERROR(error);
}
if (error == 0)
error = zfs_sa_set_xattr(zp, attrname, buf, entry_size);
kmem_free(buf, entry_size);
if (error != 0) {
zp->z_xattr_cached = NULL;
nvlist_free(nvl);
}
return (error);
}
static int
zfs_setextattr_impl(struct vop_setextattr_args *ap, boolean_t compat)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
char attrname[EXTATTR_MAXNAMELEN+1];
int error;
error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
sizeof (attrname), compat);
if (error != 0)
return (error);
struct vop_deleteextattr_args vda = {
.a_vp = ap->a_vp,
.a_attrnamespace = ap->a_attrnamespace,
.a_name = ap->a_name,
.a_cred = ap->a_cred,
.a_td = ap->a_td,
};
error = ENOENT;
if (zfsvfs->z_use_sa && zp->z_is_sa && zfsvfs->z_xattr_sa) {
error = zfs_setextattr_sa(ap, attrname);
if (error == 0) {
/*
* Successfully put into SA, we need to clear the one
* in dir if present.
*/
zfs_deleteextattr_dir(&vda, attrname);
}
}
if (error != 0) {
error = zfs_setextattr_dir(ap, attrname);
if (error == 0 && zp->z_is_sa) {
/*
* Successfully put into dir, we need to clear the one
* in SA if present.
*/
zfs_deleteextattr_sa(&vda, attrname);
}
}
if (error == 0 && ap->a_attrnamespace == EXTATTR_NAMESPACE_USER) {
/*
* Also clear all versions of the alternate compat name.
*/
zfs_deleteextattr_impl(&vda, !compat);
}
return (error);
}
/*
* Vnode operation to set a named attribute.
*/
static int
zfs_setextattr(struct vop_setextattr_args *ap)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
/*
* If the xattr property is off, refuse the request.
*/
if (!(zfsvfs->z_flags & ZSB_XATTR))
return (SET_ERROR(EOPNOTSUPP));
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VWRITE);
if (error != 0)
return (SET_ERROR(error));
error = zfs_check_attrname(ap->a_name);
if (error != 0)
return (error);
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
rw_enter(&zp->z_xattr_lock, RW_WRITER);
error = zfs_setextattr_impl(ap, zfs_xattr_compat);
rw_exit(&zp->z_xattr_lock);
zfs_exit(zfsvfs, FTAG);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_listextattr {
IN struct vnode *a_vp;
IN int a_attrnamespace;
INOUT struct uio *a_uio;
OUT size_t *a_size;
IN struct ucred *a_cred;
IN struct thread *a_td;
};
#endif
static int
zfs_listextattr_dir(struct vop_listextattr_args *ap, const char *attrprefix)
{
struct thread *td = ap->a_td;
struct nameidata nd;
uint8_t dirbuf[sizeof (struct dirent)];
struct iovec aiov;
struct uio auio;
vnode_t *xvp = NULL, *vp;
int error, eof;
error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred,
LOOKUP_XATTR, B_FALSE);
if (error != 0) {
/*
* ENOATTR means that the EA directory does not yet exist,
* i.e. there are no extended attributes there.
*/
if (error == ENOATTR)
error = 0;
return (error);
}
#if __FreeBSD_version < 1400043
NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | LOCKSHARED,
UIO_SYSSPACE, ".", xvp, td);
#else
NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | LOCKSHARED,
UIO_SYSSPACE, ".", xvp);
#endif
error = namei(&nd);
if (error != 0)
return (SET_ERROR(error));
vp = nd.ni_vp;
NDFREE_PNBUF(&nd);
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_td = td;
auio.uio_rw = UIO_READ;
auio.uio_offset = 0;
size_t plen = strlen(attrprefix);
do {
aiov.iov_base = (void *)dirbuf;
aiov.iov_len = sizeof (dirbuf);
auio.uio_resid = sizeof (dirbuf);
error = VOP_READDIR(vp, &auio, ap->a_cred, &eof, NULL, NULL);
if (error != 0)
break;
int done = sizeof (dirbuf) - auio.uio_resid;
for (int pos = 0; pos < done; ) {
struct dirent *dp = (struct dirent *)(dirbuf + pos);
pos += dp->d_reclen;
/*
* XXX: Temporarily we also accept DT_UNKNOWN, as this
* is what we get when attribute was created on Solaris.
*/
if (dp->d_type != DT_REG && dp->d_type != DT_UNKNOWN)
continue;
else if (plen == 0 &&
ZFS_XA_NS_PREFIX_FORBIDDEN(dp->d_name))
continue;
else if (strncmp(dp->d_name, attrprefix, plen) != 0)
continue;
uint8_t nlen = dp->d_namlen - plen;
if (ap->a_size != NULL) {
*ap->a_size += 1 + nlen;
} else if (ap->a_uio != NULL) {
/*
* Format of extattr name entry is one byte for
* length and the rest for name.
*/
error = uiomove(&nlen, 1, ap->a_uio);
if (error == 0) {
char *namep = dp->d_name + plen;
error = uiomove(namep, nlen, ap->a_uio);
}
if (error != 0) {
error = SET_ERROR(error);
break;
}
}
}
} while (!eof && error == 0);
vput(vp);
return (error);
}
static int
zfs_listextattr_sa(struct vop_listextattr_args *ap, const char *attrprefix)
{
znode_t *zp = VTOZ(ap->a_vp);
int error;
error = zfs_ensure_xattr_cached(zp);
if (error != 0)
return (error);
ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
ASSERT3P(zp->z_xattr_cached, !=, NULL);
size_t plen = strlen(attrprefix);
nvpair_t *nvp = NULL;
while ((nvp = nvlist_next_nvpair(zp->z_xattr_cached, nvp)) != NULL) {
ASSERT3U(nvpair_type(nvp), ==, DATA_TYPE_BYTE_ARRAY);
const char *name = nvpair_name(nvp);
if (plen == 0 && ZFS_XA_NS_PREFIX_FORBIDDEN(name))
continue;
else if (strncmp(name, attrprefix, plen) != 0)
continue;
uint8_t nlen = strlen(name) - plen;
if (ap->a_size != NULL) {
*ap->a_size += 1 + nlen;
} else if (ap->a_uio != NULL) {
/*
* Format of extattr name entry is one byte for
* length and the rest for name.
*/
error = uiomove(&nlen, 1, ap->a_uio);
if (error == 0) {
char *namep = __DECONST(char *, name) + plen;
error = uiomove(namep, nlen, ap->a_uio);
}
if (error != 0) {
error = SET_ERROR(error);
break;
}
}
}
return (error);
}
static int
zfs_listextattr_impl(struct vop_listextattr_args *ap, boolean_t compat)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
char attrprefix[16];
int error;
error = zfs_create_attrname(ap->a_attrnamespace, "", attrprefix,
sizeof (attrprefix), compat);
if (error != 0)
return (error);
if (zfsvfs->z_use_sa && zp->z_is_sa)
error = zfs_listextattr_sa(ap, attrprefix);
if (error == 0)
error = zfs_listextattr_dir(ap, attrprefix);
return (error);
}
/*
* Vnode operation to retrieve extended attributes on a vnode.
*/
static int
zfs_listextattr(struct vop_listextattr_args *ap)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
if (ap->a_size != NULL)
*ap->a_size = 0;
/*
* If the xattr property is off, refuse the request.
*/
if (!(zfsvfs->z_flags & ZSB_XATTR))
return (SET_ERROR(EOPNOTSUPP));
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VREAD);
if (error != 0)
return (SET_ERROR(error));
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
rw_enter(&zp->z_xattr_lock, RW_READER);
error = zfs_listextattr_impl(ap, zfs_xattr_compat);
if (error == 0 && ap->a_attrnamespace == EXTATTR_NAMESPACE_USER) {
/* Also list user xattrs with the alternate format. */
error = zfs_listextattr_impl(ap, !zfs_xattr_compat);
}
rw_exit(&zp->z_xattr_lock);
zfs_exit(zfsvfs, FTAG);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_getacl_args {
struct vnode *vp;
acl_type_t type;
struct acl *aclp;
struct ucred *cred;
struct thread *td;
};
#endif
static int
zfs_freebsd_getacl(struct vop_getacl_args *ap)
{
int error;
vsecattr_t vsecattr;
if (ap->a_type != ACL_TYPE_NFS4)
return (EINVAL);
vsecattr.vsa_mask = VSA_ACE | VSA_ACECNT;
if ((error = zfs_getsecattr(VTOZ(ap->a_vp),
&vsecattr, 0, ap->a_cred)))
return (error);
error = acl_from_aces(ap->a_aclp, vsecattr.vsa_aclentp,
vsecattr.vsa_aclcnt);
if (vsecattr.vsa_aclentp != NULL)
kmem_free(vsecattr.vsa_aclentp, vsecattr.vsa_aclentsz);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_setacl_args {
struct vnode *vp;
acl_type_t type;
struct acl *aclp;
struct ucred *cred;
struct thread *td;
};
#endif
static int
zfs_freebsd_setacl(struct vop_setacl_args *ap)
{
int error;
vsecattr_t vsecattr;
int aclbsize; /* size of acl list in bytes */
aclent_t *aaclp;
if (ap->a_type != ACL_TYPE_NFS4)
return (EINVAL);
if (ap->a_aclp == NULL)
return (EINVAL);
if (ap->a_aclp->acl_cnt < 1 || ap->a_aclp->acl_cnt > MAX_ACL_ENTRIES)
return (EINVAL);
/*
* With NFSv4 ACLs, chmod(2) may need to add additional entries,
* splitting every entry into two and appending "canonical six"
* entries at the end. Don't allow for setting an ACL that would
* cause chmod(2) to run out of ACL entries.
*/
if (ap->a_aclp->acl_cnt * 2 + 6 > ACL_MAX_ENTRIES)
return (ENOSPC);
error = acl_nfs4_check(ap->a_aclp, ap->a_vp->v_type == VDIR);
if (error != 0)
return (error);
vsecattr.vsa_mask = VSA_ACE;
aclbsize = ap->a_aclp->acl_cnt * sizeof (ace_t);
vsecattr.vsa_aclentp = kmem_alloc(aclbsize, KM_SLEEP);
aaclp = vsecattr.vsa_aclentp;
vsecattr.vsa_aclentsz = aclbsize;
aces_from_acl(vsecattr.vsa_aclentp, &vsecattr.vsa_aclcnt, ap->a_aclp);
error = zfs_setsecattr(VTOZ(ap->a_vp), &vsecattr, 0, ap->a_cred);
kmem_free(aaclp, aclbsize);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_aclcheck_args {
struct vnode *vp;
acl_type_t type;
struct acl *aclp;
struct ucred *cred;
struct thread *td;
};
#endif
static int
zfs_freebsd_aclcheck(struct vop_aclcheck_args *ap)
{
return (EOPNOTSUPP);
}
static int
zfs_vptocnp(struct vop_vptocnp_args *ap)
{
vnode_t *covered_vp;
vnode_t *vp = ap->a_vp;
zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
znode_t *zp = VTOZ(vp);
int ltype;
int error;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
/*
* If we are a snapshot mounted under .zfs, run the operation
* on the covered vnode.
*/
if (zp->z_id != zfsvfs->z_root || zfsvfs->z_parent == zfsvfs) {
char name[MAXNAMLEN + 1];
znode_t *dzp;
size_t len;
error = zfs_znode_parent_and_name(zp, &dzp, name);
if (error == 0) {
len = strlen(name);
if (*ap->a_buflen < len)
error = SET_ERROR(ENOMEM);
}
if (error == 0) {
*ap->a_buflen -= len;
memcpy(ap->a_buf + *ap->a_buflen, name, len);
*ap->a_vpp = ZTOV(dzp);
}
zfs_exit(zfsvfs, FTAG);
return (error);
}
zfs_exit(zfsvfs, FTAG);
covered_vp = vp->v_mount->mnt_vnodecovered;
#if __FreeBSD_version >= 1300045
enum vgetstate vs = vget_prep(covered_vp);
#else
vhold(covered_vp);
#endif
ltype = VOP_ISLOCKED(vp);
VOP_UNLOCK1(vp);
#if __FreeBSD_version >= 1300045
error = vget_finish(covered_vp, LK_SHARED, vs);
#else
error = vget(covered_vp, LK_SHARED | LK_VNHELD, curthread);
#endif
if (error == 0) {
#if __FreeBSD_version >= 1300123
error = VOP_VPTOCNP(covered_vp, ap->a_vpp, ap->a_buf,
ap->a_buflen);
#else
error = VOP_VPTOCNP(covered_vp, ap->a_vpp, ap->a_cred,
ap->a_buf, ap->a_buflen);
#endif
vput(covered_vp);
}
vn_lock(vp, ltype | LK_RETRY);
if (VN_IS_DOOMED(vp))
error = SET_ERROR(ENOENT);
return (error);
}
#if __FreeBSD_version >= 1400032
static int
zfs_deallocate(struct vop_deallocate_args *ap)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
zilog_t *zilog;
off_t off, len, file_sz;
int error;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
/*
* Callers might not be able to detect properly that we are read-only,
* so check it explicitly here.
*/
if (zfs_is_readonly(zfsvfs)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EROFS));
}
zilog = zfsvfs->z_log;
off = *ap->a_offset;
len = *ap->a_len;
file_sz = zp->z_size;
if (off + len > file_sz)
len = file_sz - off;
/* Fast path for out-of-range request. */
if (len <= 0) {
*ap->a_len = 0;
zfs_exit(zfsvfs, FTAG);
return (0);
}
error = zfs_freesp(zp, off, len, O_RDWR, TRUE);
if (error == 0) {
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS ||
(ap->a_ioflag & IO_SYNC) != 0)
zil_commit(zilog, zp->z_id);
*ap->a_offset = off + len;
*ap->a_len = 0;
}
zfs_exit(zfsvfs, FTAG);
return (error);
}
#endif
#ifndef _SYS_SYSPROTO_H_
struct vop_copy_file_range_args {
struct vnode *a_invp;
off_t *a_inoffp;
struct vnode *a_outvp;
off_t *a_outoffp;
size_t *a_lenp;
unsigned int a_flags;
struct ucred *a_incred;
struct ucred *a_outcred;
struct thread *a_fsizetd;
}
#endif
/*
* TODO: FreeBSD will only call file system-specific copy_file_range() if both
* files resides under the same mountpoint. In case of ZFS we want to be called
* even is files are in different datasets (but on the same pools, but we need
* to check that ourselves).
*/
static int
zfs_freebsd_copy_file_range(struct vop_copy_file_range_args *ap)
{
zfsvfs_t *outzfsvfs;
struct vnode *invp = ap->a_invp;
struct vnode *outvp = ap->a_outvp;
struct mount *mp;
struct uio io;
int error;
uint64_t len = *ap->a_lenp;
if (!zfs_bclone_enabled) {
mp = NULL;
goto bad_write_fallback;
}
/*
* TODO: If offset/length is not aligned to recordsize, use
* vn_generic_copy_file_range() on this fragment.
* It would be better to do this after we lock the vnodes, but then we
* need something else than vn_generic_copy_file_range().
*/
vn_start_write(outvp, &mp, V_WAIT);
if (__predict_true(mp == outvp->v_mount)) {
outzfsvfs = (zfsvfs_t *)mp->mnt_data;
if (!spa_feature_is_enabled(dmu_objset_spa(outzfsvfs->z_os),
SPA_FEATURE_BLOCK_CLONING)) {
goto bad_write_fallback;
}
}
if (invp == outvp) {
if (vn_lock(outvp, LK_EXCLUSIVE) != 0) {
goto bad_write_fallback;
}
} else {
-#if __FreeBSD_version >= 1400086
+#if (__FreeBSD_version >= 1302506 && __FreeBSD_version < 1400000) || \
+ __FreeBSD_version >= 1400086
vn_lock_pair(invp, false, LK_EXCLUSIVE, outvp, false,
LK_EXCLUSIVE);
#else
vn_lock_pair(invp, false, outvp, false);
#endif
if (VN_IS_DOOMED(invp) || VN_IS_DOOMED(outvp)) {
goto bad_locked_fallback;
}
}
#ifdef MAC
error = mac_vnode_check_write(curthread->td_ucred, ap->a_outcred,
outvp);
if (error != 0)
goto out_locked;
#endif
io.uio_offset = *ap->a_outoffp;
io.uio_resid = *ap->a_lenp;
error = vn_rlimit_fsize(outvp, &io, ap->a_fsizetd);
if (error != 0)
goto out_locked;
error = zfs_clone_range(VTOZ(invp), ap->a_inoffp, VTOZ(outvp),
ap->a_outoffp, &len, ap->a_outcred);
- if (error == EXDEV)
+ if (error == EXDEV || error == EAGAIN || error == EINVAL ||
+ error == EOPNOTSUPP)
goto bad_locked_fallback;
*ap->a_lenp = (size_t)len;
out_locked:
if (invp != outvp)
VOP_UNLOCK(invp);
VOP_UNLOCK(outvp);
if (mp != NULL)
vn_finished_write(mp);
return (error);
bad_locked_fallback:
if (invp != outvp)
VOP_UNLOCK(invp);
VOP_UNLOCK(outvp);
bad_write_fallback:
if (mp != NULL)
vn_finished_write(mp);
error = vn_generic_copy_file_range(ap->a_invp, ap->a_inoffp,
ap->a_outvp, ap->a_outoffp, ap->a_lenp, ap->a_flags,
ap->a_incred, ap->a_outcred, ap->a_fsizetd);
return (error);
}
struct vop_vector zfs_vnodeops;
struct vop_vector zfs_fifoops;
struct vop_vector zfs_shareops;
struct vop_vector zfs_vnodeops = {
.vop_default = &default_vnodeops,
.vop_inactive = zfs_freebsd_inactive,
#if __FreeBSD_version >= 1300042
.vop_need_inactive = zfs_freebsd_need_inactive,
#endif
.vop_reclaim = zfs_freebsd_reclaim,
#if __FreeBSD_version >= 1300102
.vop_fplookup_vexec = zfs_freebsd_fplookup_vexec,
#endif
#if __FreeBSD_version >= 1300139
.vop_fplookup_symlink = zfs_freebsd_fplookup_symlink,
#endif
.vop_access = zfs_freebsd_access,
.vop_allocate = VOP_EINVAL,
#if __FreeBSD_version >= 1400032
.vop_deallocate = zfs_deallocate,
#endif
.vop_lookup = zfs_cache_lookup,
.vop_cachedlookup = zfs_freebsd_cachedlookup,
.vop_getattr = zfs_freebsd_getattr,
.vop_setattr = zfs_freebsd_setattr,
.vop_create = zfs_freebsd_create,
.vop_mknod = (vop_mknod_t *)zfs_freebsd_create,
.vop_mkdir = zfs_freebsd_mkdir,
.vop_readdir = zfs_freebsd_readdir,
.vop_fsync = zfs_freebsd_fsync,
.vop_open = zfs_freebsd_open,
.vop_close = zfs_freebsd_close,
.vop_rmdir = zfs_freebsd_rmdir,
.vop_ioctl = zfs_freebsd_ioctl,
.vop_link = zfs_freebsd_link,
.vop_symlink = zfs_freebsd_symlink,
.vop_readlink = zfs_freebsd_readlink,
.vop_read = zfs_freebsd_read,
.vop_write = zfs_freebsd_write,
.vop_remove = zfs_freebsd_remove,
.vop_rename = zfs_freebsd_rename,
.vop_pathconf = zfs_freebsd_pathconf,
.vop_bmap = zfs_freebsd_bmap,
.vop_fid = zfs_freebsd_fid,
.vop_getextattr = zfs_getextattr,
.vop_deleteextattr = zfs_deleteextattr,
.vop_setextattr = zfs_setextattr,
.vop_listextattr = zfs_listextattr,
.vop_getacl = zfs_freebsd_getacl,
.vop_setacl = zfs_freebsd_setacl,
.vop_aclcheck = zfs_freebsd_aclcheck,
.vop_getpages = zfs_freebsd_getpages,
.vop_putpages = zfs_freebsd_putpages,
.vop_vptocnp = zfs_vptocnp,
#if __FreeBSD_version >= 1300064
.vop_lock1 = vop_lock,
.vop_unlock = vop_unlock,
.vop_islocked = vop_islocked,
#endif
#if __FreeBSD_version >= 1400043
.vop_add_writecount = vop_stdadd_writecount_nomsync,
#endif
.vop_copy_file_range = zfs_freebsd_copy_file_range,
};
VFS_VOP_VECTOR_REGISTER(zfs_vnodeops);
struct vop_vector zfs_fifoops = {
.vop_default = &fifo_specops,
.vop_fsync = zfs_freebsd_fsync,
#if __FreeBSD_version >= 1300102
.vop_fplookup_vexec = zfs_freebsd_fplookup_vexec,
#endif
#if __FreeBSD_version >= 1300139
.vop_fplookup_symlink = zfs_freebsd_fplookup_symlink,
#endif
.vop_access = zfs_freebsd_access,
.vop_getattr = zfs_freebsd_getattr,
.vop_inactive = zfs_freebsd_inactive,
.vop_read = VOP_PANIC,
.vop_reclaim = zfs_freebsd_reclaim,
.vop_setattr = zfs_freebsd_setattr,
.vop_write = VOP_PANIC,
.vop_pathconf = zfs_freebsd_pathconf,
.vop_fid = zfs_freebsd_fid,
.vop_getacl = zfs_freebsd_getacl,
.vop_setacl = zfs_freebsd_setacl,
.vop_aclcheck = zfs_freebsd_aclcheck,
#if __FreeBSD_version >= 1400043
.vop_add_writecount = vop_stdadd_writecount_nomsync,
#endif
};
VFS_VOP_VECTOR_REGISTER(zfs_fifoops);
/*
* special share hidden files vnode operations template
*/
struct vop_vector zfs_shareops = {
.vop_default = &default_vnodeops,
#if __FreeBSD_version >= 1300121
.vop_fplookup_vexec = VOP_EAGAIN,
#endif
#if __FreeBSD_version >= 1300139
.vop_fplookup_symlink = VOP_EAGAIN,
#endif
.vop_access = zfs_freebsd_access,
.vop_inactive = zfs_freebsd_inactive,
.vop_reclaim = zfs_freebsd_reclaim,
.vop_fid = zfs_freebsd_fid,
.vop_pathconf = zfs_freebsd_pathconf,
#if __FreeBSD_version >= 1400043
.vop_add_writecount = vop_stdadd_writecount_nomsync,
#endif
};
VFS_VOP_VECTOR_REGISTER(zfs_shareops);
ZFS_MODULE_PARAM(zfs, zfs_, xattr_compat, INT, ZMOD_RW,
"Use legacy ZFS xattr naming for writing new user namespace xattrs");
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c
index dca48e1e4010..c45a3eb5a4eb 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c
@@ -1,1302 +1,1318 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (C) 2011 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* LLNL-CODE-403049.
* Rewritten for Linux by:
* Rohan Puri <rohan.puri15@gmail.com>
* Brian Behlendorf <behlendorf1@llnl.gov>
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
* Copyright (c) 2018 George Melikov. All Rights Reserved.
* Copyright (c) 2019 Datto, Inc. All rights reserved.
* Copyright (c) 2020 The MathWorks, Inc. All rights reserved.
*/
/*
* ZFS control directory (a.k.a. ".zfs")
*
* This directory provides a common location for all ZFS meta-objects.
* Currently, this is only the 'snapshot' and 'shares' directory, but this may
* expand in the future. The elements are built dynamically, as the hierarchy
* does not actually exist on disk.
*
* For 'snapshot', we don't want to have all snapshots always mounted, because
* this would take up a huge amount of space in /etc/mnttab. We have three
* types of objects:
*
* ctldir ------> snapshotdir -------> snapshot
* |
* |
* V
* mounted fs
*
* The 'snapshot' node contains just enough information to lookup '..' and act
* as a mountpoint for the snapshot. Whenever we lookup a specific snapshot, we
* perform an automount of the underlying filesystem and return the
* corresponding inode.
*
* All mounts are handled automatically by an user mode helper which invokes
* the mount procedure. Unmounts are handled by allowing the mount
* point to expire so the kernel may automatically unmount it.
*
* The '.zfs', '.zfs/snapshot', and all directories created under
* '.zfs/snapshot' (ie: '.zfs/snapshot/<snapname>') all share the same
* zfsvfs_t as the head filesystem (what '.zfs' lives under).
*
* File systems mounted on top of the '.zfs/snapshot/<snapname>' paths
* (ie: snapshots) are complete ZFS filesystems and have their own unique
* zfsvfs_t. However, the fsid reported by these mounts will be the same
* as that used by the parent zfsvfs_t to make NFS happy.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/sysmacros.h>
#include <sys/pathname.h>
#include <sys/vfs.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_vnops.h>
#include <sys/stat.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_deleg.h>
#include <sys/zpl.h>
#include <sys/mntent.h>
#include "zfs_namecheck.h"
/*
* Two AVL trees are maintained which contain all currently automounted
* snapshots. Every automounted snapshots maps to a single zfs_snapentry_t
* entry which MUST:
*
* - be attached to both trees, and
* - be unique, no duplicate entries are allowed.
*
* The zfs_snapshots_by_name tree is indexed by the full dataset name
* while the zfs_snapshots_by_objsetid tree is indexed by the unique
* objsetid. This allows for fast lookups either by name or objsetid.
*/
static avl_tree_t zfs_snapshots_by_name;
static avl_tree_t zfs_snapshots_by_objsetid;
static krwlock_t zfs_snapshot_lock;
/*
* Control Directory Tunables (.zfs)
*/
int zfs_expire_snapshot = ZFSCTL_EXPIRE_SNAPSHOT;
static int zfs_admin_snapshot = 0;
typedef struct {
char *se_name; /* full snapshot name */
char *se_path; /* full mount path */
spa_t *se_spa; /* pool spa */
uint64_t se_objsetid; /* snapshot objset id */
struct dentry *se_root_dentry; /* snapshot root dentry */
krwlock_t se_taskqid_lock; /* scheduled unmount taskqid lock */
taskqid_t se_taskqid; /* scheduled unmount taskqid */
avl_node_t se_node_name; /* zfs_snapshots_by_name link */
avl_node_t se_node_objsetid; /* zfs_snapshots_by_objsetid link */
zfs_refcount_t se_refcount; /* reference count */
} zfs_snapentry_t;
static void zfsctl_snapshot_unmount_delay_impl(zfs_snapentry_t *se, int delay);
/*
* Allocate a new zfs_snapentry_t being careful to make a copy of the
* the snapshot name and provided mount point. No reference is taken.
*/
static zfs_snapentry_t *
zfsctl_snapshot_alloc(const char *full_name, const char *full_path, spa_t *spa,
uint64_t objsetid, struct dentry *root_dentry)
{
zfs_snapentry_t *se;
se = kmem_zalloc(sizeof (zfs_snapentry_t), KM_SLEEP);
se->se_name = kmem_strdup(full_name);
se->se_path = kmem_strdup(full_path);
se->se_spa = spa;
se->se_objsetid = objsetid;
se->se_root_dentry = root_dentry;
se->se_taskqid = TASKQID_INVALID;
rw_init(&se->se_taskqid_lock, NULL, RW_DEFAULT, NULL);
zfs_refcount_create(&se->se_refcount);
return (se);
}
/*
* Free a zfs_snapentry_t the caller must ensure there are no active
* references.
*/
static void
zfsctl_snapshot_free(zfs_snapentry_t *se)
{
zfs_refcount_destroy(&se->se_refcount);
kmem_strfree(se->se_name);
kmem_strfree(se->se_path);
rw_destroy(&se->se_taskqid_lock);
kmem_free(se, sizeof (zfs_snapentry_t));
}
/*
* Hold a reference on the zfs_snapentry_t.
*/
static void
zfsctl_snapshot_hold(zfs_snapentry_t *se)
{
zfs_refcount_add(&se->se_refcount, NULL);
}
/*
* Release a reference on the zfs_snapentry_t. When the number of
* references drops to zero the structure will be freed.
*/
static void
zfsctl_snapshot_rele(zfs_snapentry_t *se)
{
if (zfs_refcount_remove(&se->se_refcount, NULL) == 0)
zfsctl_snapshot_free(se);
}
/*
* Add a zfs_snapentry_t to both the zfs_snapshots_by_name and
* zfs_snapshots_by_objsetid trees. While the zfs_snapentry_t is part
* of the trees a reference is held.
*/
static void
zfsctl_snapshot_add(zfs_snapentry_t *se)
{
ASSERT(RW_WRITE_HELD(&zfs_snapshot_lock));
zfsctl_snapshot_hold(se);
avl_add(&zfs_snapshots_by_name, se);
avl_add(&zfs_snapshots_by_objsetid, se);
}
/*
* Remove a zfs_snapentry_t from both the zfs_snapshots_by_name and
* zfs_snapshots_by_objsetid trees. Upon removal a reference is dropped,
* this can result in the structure being freed if that was the last
* remaining reference.
*/
static void
zfsctl_snapshot_remove(zfs_snapentry_t *se)
{
ASSERT(RW_WRITE_HELD(&zfs_snapshot_lock));
avl_remove(&zfs_snapshots_by_name, se);
avl_remove(&zfs_snapshots_by_objsetid, se);
zfsctl_snapshot_rele(se);
}
/*
* Snapshot name comparison function for the zfs_snapshots_by_name.
*/
static int
snapentry_compare_by_name(const void *a, const void *b)
{
const zfs_snapentry_t *se_a = a;
const zfs_snapentry_t *se_b = b;
int ret;
ret = strcmp(se_a->se_name, se_b->se_name);
if (ret < 0)
return (-1);
else if (ret > 0)
return (1);
else
return (0);
}
/*
* Snapshot name comparison function for the zfs_snapshots_by_objsetid.
*/
static int
snapentry_compare_by_objsetid(const void *a, const void *b)
{
const zfs_snapentry_t *se_a = a;
const zfs_snapentry_t *se_b = b;
if (se_a->se_spa != se_b->se_spa)
return ((ulong_t)se_a->se_spa < (ulong_t)se_b->se_spa ? -1 : 1);
if (se_a->se_objsetid < se_b->se_objsetid)
return (-1);
else if (se_a->se_objsetid > se_b->se_objsetid)
return (1);
else
return (0);
}
/*
* Find a zfs_snapentry_t in zfs_snapshots_by_name. If the snapname
* is found a pointer to the zfs_snapentry_t is returned and a reference
* taken on the structure. The caller is responsible for dropping the
* reference with zfsctl_snapshot_rele(). If the snapname is not found
* NULL will be returned.
*/
static zfs_snapentry_t *
zfsctl_snapshot_find_by_name(const char *snapname)
{
zfs_snapentry_t *se, search;
ASSERT(RW_LOCK_HELD(&zfs_snapshot_lock));
search.se_name = (char *)snapname;
se = avl_find(&zfs_snapshots_by_name, &search, NULL);
if (se)
zfsctl_snapshot_hold(se);
return (se);
}
/*
* Find a zfs_snapentry_t in zfs_snapshots_by_objsetid given the objset id
* rather than the snapname. In all other respects it behaves the same
* as zfsctl_snapshot_find_by_name().
*/
static zfs_snapentry_t *
zfsctl_snapshot_find_by_objsetid(spa_t *spa, uint64_t objsetid)
{
zfs_snapentry_t *se, search;
ASSERT(RW_LOCK_HELD(&zfs_snapshot_lock));
search.se_spa = spa;
search.se_objsetid = objsetid;
se = avl_find(&zfs_snapshots_by_objsetid, &search, NULL);
if (se)
zfsctl_snapshot_hold(se);
return (se);
}
/*
* Rename a zfs_snapentry_t in the zfs_snapshots_by_name. The structure is
* removed, renamed, and added back to the new correct location in the tree.
*/
static int
zfsctl_snapshot_rename(const char *old_snapname, const char *new_snapname)
{
zfs_snapentry_t *se;
ASSERT(RW_WRITE_HELD(&zfs_snapshot_lock));
se = zfsctl_snapshot_find_by_name(old_snapname);
if (se == NULL)
return (SET_ERROR(ENOENT));
zfsctl_snapshot_remove(se);
kmem_strfree(se->se_name);
se->se_name = kmem_strdup(new_snapname);
zfsctl_snapshot_add(se);
zfsctl_snapshot_rele(se);
return (0);
}
/*
* Delayed task responsible for unmounting an expired automounted snapshot.
*/
static void
snapentry_expire(void *data)
{
zfs_snapentry_t *se = (zfs_snapentry_t *)data;
spa_t *spa = se->se_spa;
uint64_t objsetid = se->se_objsetid;
if (zfs_expire_snapshot <= 0) {
zfsctl_snapshot_rele(se);
return;
}
rw_enter(&se->se_taskqid_lock, RW_WRITER);
se->se_taskqid = TASKQID_INVALID;
rw_exit(&se->se_taskqid_lock);
(void) zfsctl_snapshot_unmount(se->se_name, MNT_EXPIRE);
zfsctl_snapshot_rele(se);
/*
* Reschedule the unmount if the zfs_snapentry_t wasn't removed.
* This can occur when the snapshot is busy.
*/
rw_enter(&zfs_snapshot_lock, RW_READER);
if ((se = zfsctl_snapshot_find_by_objsetid(spa, objsetid)) != NULL) {
zfsctl_snapshot_unmount_delay_impl(se, zfs_expire_snapshot);
zfsctl_snapshot_rele(se);
}
rw_exit(&zfs_snapshot_lock);
}
/*
* Cancel an automatic unmount of a snapname. This callback is responsible
* for dropping the reference on the zfs_snapentry_t which was taken when
* during dispatch.
*/
static void
zfsctl_snapshot_unmount_cancel(zfs_snapentry_t *se)
{
int err = 0;
rw_enter(&se->se_taskqid_lock, RW_WRITER);
err = taskq_cancel_id(system_delay_taskq, se->se_taskqid);
/*
* if we get ENOENT, the taskq couldn't be found to be
* canceled, so we can just mark it as invalid because
* it's already gone. If we got EBUSY, then we already
* blocked until it was gone _anyway_, so we don't care.
*/
se->se_taskqid = TASKQID_INVALID;
rw_exit(&se->se_taskqid_lock);
if (err == 0) {
zfsctl_snapshot_rele(se);
}
}
/*
* Dispatch the unmount task for delayed handling with a hold protecting it.
*/
static void
zfsctl_snapshot_unmount_delay_impl(zfs_snapentry_t *se, int delay)
{
if (delay <= 0)
return;
zfsctl_snapshot_hold(se);
rw_enter(&se->se_taskqid_lock, RW_WRITER);
/*
* If this condition happens, we managed to:
* - dispatch once
* - want to dispatch _again_ before it returned
*
* So let's just return - if that task fails at unmounting,
* we'll eventually dispatch again, and if it succeeds,
* no problem.
*/
if (se->se_taskqid != TASKQID_INVALID) {
rw_exit(&se->se_taskqid_lock);
zfsctl_snapshot_rele(se);
return;
}
se->se_taskqid = taskq_dispatch_delay(system_delay_taskq,
snapentry_expire, se, TQ_SLEEP, ddi_get_lbolt() + delay * HZ);
rw_exit(&se->se_taskqid_lock);
}
/*
* Schedule an automatic unmount of objset id to occur in delay seconds from
* now. Any previous delayed unmount will be cancelled in favor of the
* updated deadline. A reference is taken by zfsctl_snapshot_find_by_name()
* and held until the outstanding task is handled or cancelled.
*/
int
zfsctl_snapshot_unmount_delay(spa_t *spa, uint64_t objsetid, int delay)
{
zfs_snapentry_t *se;
int error = ENOENT;
rw_enter(&zfs_snapshot_lock, RW_READER);
if ((se = zfsctl_snapshot_find_by_objsetid(spa, objsetid)) != NULL) {
zfsctl_snapshot_unmount_cancel(se);
zfsctl_snapshot_unmount_delay_impl(se, delay);
zfsctl_snapshot_rele(se);
error = 0;
}
rw_exit(&zfs_snapshot_lock);
return (error);
}
/*
* Check if snapname is currently mounted. Returned non-zero when mounted
* and zero when unmounted.
*/
static boolean_t
zfsctl_snapshot_ismounted(const char *snapname)
{
zfs_snapentry_t *se;
boolean_t ismounted = B_FALSE;
rw_enter(&zfs_snapshot_lock, RW_READER);
if ((se = zfsctl_snapshot_find_by_name(snapname)) != NULL) {
zfsctl_snapshot_rele(se);
ismounted = B_TRUE;
}
rw_exit(&zfs_snapshot_lock);
return (ismounted);
}
/*
* Check if the given inode is a part of the virtual .zfs directory.
*/
boolean_t
zfsctl_is_node(struct inode *ip)
{
return (ITOZ(ip)->z_is_ctldir);
}
/*
* Check if the given inode is a .zfs/snapshots/snapname directory.
*/
boolean_t
zfsctl_is_snapdir(struct inode *ip)
{
return (zfsctl_is_node(ip) && (ip->i_ino <= ZFSCTL_INO_SNAPDIRS));
}
/*
* Allocate a new inode with the passed id and ops.
*/
static struct inode *
zfsctl_inode_alloc(zfsvfs_t *zfsvfs, uint64_t id,
- const struct file_operations *fops, const struct inode_operations *ops)
+ const struct file_operations *fops, const struct inode_operations *ops,
+ uint64_t creation)
{
- inode_timespec_t now;
struct inode *ip;
znode_t *zp;
+ inode_timespec_t now = {.tv_sec = creation};
ip = new_inode(zfsvfs->z_sb);
if (ip == NULL)
return (NULL);
- now = current_time(ip);
+ if (!creation)
+ now = current_time(ip);
zp = ITOZ(ip);
ASSERT3P(zp->z_dirlocks, ==, NULL);
ASSERT3P(zp->z_acl_cached, ==, NULL);
ASSERT3P(zp->z_xattr_cached, ==, NULL);
zp->z_id = id;
zp->z_unlinked = B_FALSE;
zp->z_atime_dirty = B_FALSE;
zp->z_zn_prefetch = B_FALSE;
zp->z_is_sa = B_FALSE;
#if !defined(HAVE_FILEMAP_RANGE_HAS_PAGE)
zp->z_is_mapped = B_FALSE;
#endif
zp->z_is_ctldir = B_TRUE;
zp->z_sa_hdl = NULL;
zp->z_blksz = 0;
zp->z_seq = 0;
zp->z_mapcnt = 0;
zp->z_size = 0;
zp->z_pflags = 0;
zp->z_mode = 0;
zp->z_sync_cnt = 0;
zp->z_sync_writes_cnt = 0;
zp->z_async_writes_cnt = 0;
ip->i_generation = 0;
ip->i_ino = id;
ip->i_mode = (S_IFDIR | S_IRWXUGO);
ip->i_uid = SUID_TO_KUID(0);
ip->i_gid = SGID_TO_KGID(0);
ip->i_blkbits = SPA_MINBLOCKSHIFT;
ip->i_atime = now;
ip->i_mtime = now;
ip->i_ctime = now;
ip->i_fop = fops;
ip->i_op = ops;
#if defined(IOP_XATTR)
ip->i_opflags &= ~IOP_XATTR;
#endif
if (insert_inode_locked(ip)) {
unlock_new_inode(ip);
iput(ip);
return (NULL);
}
mutex_enter(&zfsvfs->z_znodes_lock);
list_insert_tail(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes++;
membar_producer();
mutex_exit(&zfsvfs->z_znodes_lock);
unlock_new_inode(ip);
return (ip);
}
/*
* Lookup the inode with given id, it will be allocated if needed.
*/
static struct inode *
zfsctl_inode_lookup(zfsvfs_t *zfsvfs, uint64_t id,
const struct file_operations *fops, const struct inode_operations *ops)
{
struct inode *ip = NULL;
+ uint64_t creation = 0;
+ dsl_dataset_t *snap_ds;
+ dsl_pool_t *pool;
while (ip == NULL) {
ip = ilookup(zfsvfs->z_sb, (unsigned long)id);
if (ip)
break;
+ if (id <= ZFSCTL_INO_SNAPDIRS && !creation) {
+ pool = dmu_objset_pool(zfsvfs->z_os);
+ dsl_pool_config_enter(pool, FTAG);
+ if (!dsl_dataset_hold_obj(pool,
+ ZFSCTL_INO_SNAPDIRS - id, FTAG, &snap_ds)) {
+ creation = dsl_get_creation(snap_ds);
+ dsl_dataset_rele(snap_ds, FTAG);
+ }
+ dsl_pool_config_exit(pool, FTAG);
+ }
+
/* May fail due to concurrent zfsctl_inode_alloc() */
- ip = zfsctl_inode_alloc(zfsvfs, id, fops, ops);
+ ip = zfsctl_inode_alloc(zfsvfs, id, fops, ops, creation);
}
return (ip);
}
/*
* Create the '.zfs' directory. This directory is cached as part of the VFS
* structure. This results in a hold on the zfsvfs_t. The code in zfs_umount()
* therefore checks against a vfs_count of 2 instead of 1. This reference
* is removed when the ctldir is destroyed in the unmount. All other entities
* under the '.zfs' directory are created dynamically as needed.
*
* Because the dynamically created '.zfs' directory entries assume the use
* of 64-bit inode numbers this support must be disabled on 32-bit systems.
*/
int
zfsctl_create(zfsvfs_t *zfsvfs)
{
ASSERT(zfsvfs->z_ctldir == NULL);
zfsvfs->z_ctldir = zfsctl_inode_alloc(zfsvfs, ZFSCTL_INO_ROOT,
- &zpl_fops_root, &zpl_ops_root);
+ &zpl_fops_root, &zpl_ops_root, 0);
if (zfsvfs->z_ctldir == NULL)
return (SET_ERROR(ENOENT));
return (0);
}
/*
* Destroy the '.zfs' directory or remove a snapshot from zfs_snapshots_by_name.
* Only called when the filesystem is unmounted.
*/
void
zfsctl_destroy(zfsvfs_t *zfsvfs)
{
if (zfsvfs->z_issnap) {
zfs_snapentry_t *se;
spa_t *spa = zfsvfs->z_os->os_spa;
uint64_t objsetid = dmu_objset_id(zfsvfs->z_os);
rw_enter(&zfs_snapshot_lock, RW_WRITER);
se = zfsctl_snapshot_find_by_objsetid(spa, objsetid);
if (se != NULL)
zfsctl_snapshot_remove(se);
rw_exit(&zfs_snapshot_lock);
if (se != NULL) {
zfsctl_snapshot_unmount_cancel(se);
zfsctl_snapshot_rele(se);
}
} else if (zfsvfs->z_ctldir) {
iput(zfsvfs->z_ctldir);
zfsvfs->z_ctldir = NULL;
}
}
/*
* Given a root znode, retrieve the associated .zfs directory.
* Add a hold to the vnode and return it.
*/
struct inode *
zfsctl_root(znode_t *zp)
{
ASSERT(zfs_has_ctldir(zp));
/* Must have an existing ref, so igrab() cannot return NULL */
VERIFY3P(igrab(ZTOZSB(zp)->z_ctldir), !=, NULL);
return (ZTOZSB(zp)->z_ctldir);
}
/*
* Generate a long fid to indicate a snapdir. We encode whether snapdir is
* already mounted in gen field. We do this because nfsd lookup will not
* trigger automount. Next time the nfsd does fh_to_dentry, we will notice
* this and do automount and return ESTALE to force nfsd revalidate and follow
* mount.
*/
static int
zfsctl_snapdir_fid(struct inode *ip, fid_t *fidp)
{
zfid_short_t *zfid = (zfid_short_t *)fidp;
zfid_long_t *zlfid = (zfid_long_t *)fidp;
uint32_t gen = 0;
uint64_t object;
uint64_t objsetid;
int i;
struct dentry *dentry;
if (fidp->fid_len < LONG_FID_LEN) {
fidp->fid_len = LONG_FID_LEN;
return (SET_ERROR(ENOSPC));
}
object = ip->i_ino;
objsetid = ZFSCTL_INO_SNAPDIRS - ip->i_ino;
zfid->zf_len = LONG_FID_LEN;
dentry = d_obtain_alias(igrab(ip));
if (!IS_ERR(dentry)) {
gen = !!d_mountpoint(dentry);
dput(dentry);
}
for (i = 0; i < sizeof (zfid->zf_object); i++)
zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
for (i = 0; i < sizeof (zfid->zf_gen); i++)
zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
for (i = 0; i < sizeof (zlfid->zf_setid); i++)
zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
zlfid->zf_setgen[i] = 0;
return (0);
}
/*
* Generate an appropriate fid for an entry in the .zfs directory.
*/
int
zfsctl_fid(struct inode *ip, fid_t *fidp)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint64_t object = zp->z_id;
zfid_short_t *zfid;
int i;
int error;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
if (zfsctl_is_snapdir(ip)) {
zfs_exit(zfsvfs, FTAG);
return (zfsctl_snapdir_fid(ip, fidp));
}
if (fidp->fid_len < SHORT_FID_LEN) {
fidp->fid_len = SHORT_FID_LEN;
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOSPC));
}
zfid = (zfid_short_t *)fidp;
zfid->zf_len = SHORT_FID_LEN;
for (i = 0; i < sizeof (zfid->zf_object); i++)
zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
/* .zfs znodes always have a generation number of 0 */
for (i = 0; i < sizeof (zfid->zf_gen); i++)
zfid->zf_gen[i] = 0;
zfs_exit(zfsvfs, FTAG);
return (0);
}
/*
* Construct a full dataset name in full_name: "pool/dataset@snap_name"
*/
static int
zfsctl_snapshot_name(zfsvfs_t *zfsvfs, const char *snap_name, int len,
char *full_name)
{
objset_t *os = zfsvfs->z_os;
if (zfs_component_namecheck(snap_name, NULL, NULL) != 0)
return (SET_ERROR(EILSEQ));
dmu_objset_name(os, full_name);
if ((strlen(full_name) + 1 + strlen(snap_name)) >= len)
return (SET_ERROR(ENAMETOOLONG));
(void) strcat(full_name, "@");
(void) strcat(full_name, snap_name);
return (0);
}
/*
* Returns full path in full_path: "/pool/dataset/.zfs/snapshot/snap_name/"
*/
static int
zfsctl_snapshot_path_objset(zfsvfs_t *zfsvfs, uint64_t objsetid,
int path_len, char *full_path)
{
objset_t *os = zfsvfs->z_os;
fstrans_cookie_t cookie;
char *snapname;
boolean_t case_conflict;
uint64_t id, pos = 0;
int error = 0;
if (zfsvfs->z_vfs->vfs_mntpoint == NULL)
return (SET_ERROR(ENOENT));
cookie = spl_fstrans_mark();
snapname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
while (error == 0) {
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
error = dmu_snapshot_list_next(zfsvfs->z_os,
ZFS_MAX_DATASET_NAME_LEN, snapname, &id, &pos,
&case_conflict);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
if (error)
goto out;
if (id == objsetid)
break;
}
snprintf(full_path, path_len, "%s/.zfs/snapshot/%s",
zfsvfs->z_vfs->vfs_mntpoint, snapname);
out:
kmem_free(snapname, ZFS_MAX_DATASET_NAME_LEN);
spl_fstrans_unmark(cookie);
return (error);
}
/*
* Special case the handling of "..".
*/
int
zfsctl_root_lookup(struct inode *dip, const char *name, struct inode **ipp,
int flags, cred_t *cr, int *direntflags, pathname_t *realpnp)
{
zfsvfs_t *zfsvfs = ITOZSB(dip);
int error = 0;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
if (strcmp(name, "..") == 0) {
*ipp = dip->i_sb->s_root->d_inode;
} else if (strcmp(name, ZFS_SNAPDIR_NAME) == 0) {
*ipp = zfsctl_inode_lookup(zfsvfs, ZFSCTL_INO_SNAPDIR,
&zpl_fops_snapdir, &zpl_ops_snapdir);
} else if (strcmp(name, ZFS_SHAREDIR_NAME) == 0) {
*ipp = zfsctl_inode_lookup(zfsvfs, ZFSCTL_INO_SHARES,
&zpl_fops_shares, &zpl_ops_shares);
} else {
*ipp = NULL;
}
if (*ipp == NULL)
error = SET_ERROR(ENOENT);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Lookup entry point for the 'snapshot' directory. Try to open the
* snapshot if it exist, creating the pseudo filesystem inode as necessary.
*/
int
zfsctl_snapdir_lookup(struct inode *dip, const char *name, struct inode **ipp,
int flags, cred_t *cr, int *direntflags, pathname_t *realpnp)
{
zfsvfs_t *zfsvfs = ITOZSB(dip);
uint64_t id;
int error;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
error = dmu_snapshot_lookup(zfsvfs->z_os, name, &id);
if (error) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
*ipp = zfsctl_inode_lookup(zfsvfs, ZFSCTL_INO_SNAPDIRS - id,
&simple_dir_operations, &simple_dir_inode_operations);
if (*ipp == NULL)
error = SET_ERROR(ENOENT);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Renaming a directory under '.zfs/snapshot' will automatically trigger
* a rename of the snapshot to the new given name. The rename is confined
* to the '.zfs/snapshot' directory snapshots cannot be moved elsewhere.
*/
int
zfsctl_snapdir_rename(struct inode *sdip, const char *snm,
struct inode *tdip, const char *tnm, cred_t *cr, int flags)
{
zfsvfs_t *zfsvfs = ITOZSB(sdip);
char *to, *from, *real, *fsname;
int error;
if (!zfs_admin_snapshot)
return (SET_ERROR(EACCES));
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
to = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
from = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
real = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
fsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
error = dmu_snapshot_realname(zfsvfs->z_os, snm, real,
ZFS_MAX_DATASET_NAME_LEN, NULL);
if (error == 0) {
snm = real;
} else if (error != ENOTSUP) {
goto out;
}
}
dmu_objset_name(zfsvfs->z_os, fsname);
error = zfsctl_snapshot_name(ITOZSB(sdip), snm,
ZFS_MAX_DATASET_NAME_LEN, from);
if (error == 0)
error = zfsctl_snapshot_name(ITOZSB(tdip), tnm,
ZFS_MAX_DATASET_NAME_LEN, to);
if (error == 0)
error = zfs_secpolicy_rename_perms(from, to, cr);
if (error != 0)
goto out;
/*
* Cannot move snapshots out of the snapdir.
*/
if (sdip != tdip) {
error = SET_ERROR(EINVAL);
goto out;
}
/*
* No-op when names are identical.
*/
if (strcmp(snm, tnm) == 0) {
error = 0;
goto out;
}
rw_enter(&zfs_snapshot_lock, RW_WRITER);
error = dsl_dataset_rename_snapshot(fsname, snm, tnm, B_FALSE);
if (error == 0)
(void) zfsctl_snapshot_rename(snm, tnm);
rw_exit(&zfs_snapshot_lock);
out:
kmem_free(from, ZFS_MAX_DATASET_NAME_LEN);
kmem_free(to, ZFS_MAX_DATASET_NAME_LEN);
kmem_free(real, ZFS_MAX_DATASET_NAME_LEN);
kmem_free(fsname, ZFS_MAX_DATASET_NAME_LEN);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Removing a directory under '.zfs/snapshot' will automatically trigger
* the removal of the snapshot with the given name.
*/
int
zfsctl_snapdir_remove(struct inode *dip, const char *name, cred_t *cr,
int flags)
{
zfsvfs_t *zfsvfs = ITOZSB(dip);
char *snapname, *real;
int error;
if (!zfs_admin_snapshot)
return (SET_ERROR(EACCES));
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
snapname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
real = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
error = dmu_snapshot_realname(zfsvfs->z_os, name, real,
ZFS_MAX_DATASET_NAME_LEN, NULL);
if (error == 0) {
name = real;
} else if (error != ENOTSUP) {
goto out;
}
}
error = zfsctl_snapshot_name(ITOZSB(dip), name,
ZFS_MAX_DATASET_NAME_LEN, snapname);
if (error == 0)
error = zfs_secpolicy_destroy_perms(snapname, cr);
if (error != 0)
goto out;
error = zfsctl_snapshot_unmount(snapname, MNT_FORCE);
if ((error == 0) || (error == ENOENT))
error = dsl_destroy_snapshot(snapname, B_FALSE);
out:
kmem_free(snapname, ZFS_MAX_DATASET_NAME_LEN);
kmem_free(real, ZFS_MAX_DATASET_NAME_LEN);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Creating a directory under '.zfs/snapshot' will automatically trigger
* the creation of a new snapshot with the given name.
*/
int
zfsctl_snapdir_mkdir(struct inode *dip, const char *dirname, vattr_t *vap,
struct inode **ipp, cred_t *cr, int flags)
{
zfsvfs_t *zfsvfs = ITOZSB(dip);
char *dsname;
int error;
if (!zfs_admin_snapshot)
return (SET_ERROR(EACCES));
dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
if (zfs_component_namecheck(dirname, NULL, NULL) != 0) {
error = SET_ERROR(EILSEQ);
goto out;
}
dmu_objset_name(zfsvfs->z_os, dsname);
error = zfs_secpolicy_snapshot_perms(dsname, cr);
if (error != 0)
goto out;
if (error == 0) {
error = dmu_objset_snapshot_one(dsname, dirname);
if (error != 0)
goto out;
error = zfsctl_snapdir_lookup(dip, dirname, ipp,
0, cr, NULL, NULL);
}
out:
kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN);
return (error);
}
/*
* Flush everything out of the kernel's export table and such.
* This is needed as once the snapshot is used over NFS, its
* entries in svc_export and svc_expkey caches hold reference
* to the snapshot mount point. There is no known way of flushing
* only the entries related to the snapshot.
*/
static void
exportfs_flush(void)
{
char *argv[] = { "/usr/sbin/exportfs", "-f", NULL };
char *envp[] = { NULL };
(void) call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
}
/*
* Attempt to unmount a snapshot by making a call to user space.
* There is no assurance that this can or will succeed, is just a
* best effort. In the case where it does fail, perhaps because
* it's in use, the unmount will fail harmlessly.
*/
int
zfsctl_snapshot_unmount(const char *snapname, int flags)
{
char *argv[] = { "/usr/bin/env", "umount", "-t", "zfs", "-n", NULL,
NULL };
char *envp[] = { NULL };
zfs_snapentry_t *se;
int error;
rw_enter(&zfs_snapshot_lock, RW_READER);
if ((se = zfsctl_snapshot_find_by_name(snapname)) == NULL) {
rw_exit(&zfs_snapshot_lock);
return (SET_ERROR(ENOENT));
}
rw_exit(&zfs_snapshot_lock);
exportfs_flush();
if (flags & MNT_FORCE)
argv[4] = "-fn";
argv[5] = se->se_path;
dprintf("unmount; path=%s\n", se->se_path);
error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
zfsctl_snapshot_rele(se);
/*
* The umount system utility will return 256 on error. We must
* assume this error is because the file system is busy so it is
* converted to the more sensible EBUSY.
*/
if (error)
error = SET_ERROR(EBUSY);
return (error);
}
int
zfsctl_snapshot_mount(struct path *path, int flags)
{
struct dentry *dentry = path->dentry;
struct inode *ip = dentry->d_inode;
zfsvfs_t *zfsvfs;
zfsvfs_t *snap_zfsvfs;
zfs_snapentry_t *se;
char *full_name, *full_path;
char *argv[] = { "/usr/bin/env", "mount", "-t", "zfs", "-n", NULL, NULL,
NULL };
char *envp[] = { NULL };
int error;
struct path spath;
if (ip == NULL)
return (SET_ERROR(EISDIR));
zfsvfs = ITOZSB(ip);
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
full_name = kmem_zalloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
full_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
error = zfsctl_snapshot_name(zfsvfs, dname(dentry),
ZFS_MAX_DATASET_NAME_LEN, full_name);
if (error)
goto error;
/*
* Construct a mount point path from sb of the ctldir inode and dirent
* name, instead of from d_path(), so that chroot'd process doesn't fail
* on mount.zfs(8).
*/
snprintf(full_path, MAXPATHLEN, "%s/.zfs/snapshot/%s",
zfsvfs->z_vfs->vfs_mntpoint ? zfsvfs->z_vfs->vfs_mntpoint : "",
dname(dentry));
/*
* Multiple concurrent automounts of a snapshot are never allowed.
* The snapshot may be manually mounted as many times as desired.
*/
if (zfsctl_snapshot_ismounted(full_name)) {
error = 0;
goto error;
}
/*
* Attempt to mount the snapshot from user space. Normally this
* would be done using the vfs_kern_mount() function, however that
* function is marked GPL-only and cannot be used. On error we
* careful to log the real error to the console and return EISDIR
* to safely abort the automount. This should be very rare.
*
* If the user mode helper happens to return EBUSY, a concurrent
* mount is already in progress in which case the error is ignored.
* Take note that if the program was executed successfully the return
* value from call_usermodehelper() will be (exitcode << 8 + signal).
*/
dprintf("mount; name=%s path=%s\n", full_name, full_path);
argv[5] = full_name;
argv[6] = full_path;
error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
if (error) {
if (!(error & MOUNT_BUSY << 8)) {
zfs_dbgmsg("Unable to automount %s error=%d",
full_path, error);
error = SET_ERROR(EISDIR);
} else {
/*
* EBUSY, this could mean a concurrent mount, or the
* snapshot has already been mounted at completely
* different place. We return 0 so VFS will retry. For
* the latter case the VFS will retry several times
* and return ELOOP, which is probably not a very good
* behavior.
*/
error = 0;
}
goto error;
}
/*
* Follow down in to the mounted snapshot and set MNT_SHRINKABLE
* to identify this as an automounted filesystem.
*/
spath = *path;
path_get(&spath);
if (follow_down_one(&spath)) {
snap_zfsvfs = ITOZSB(spath.dentry->d_inode);
snap_zfsvfs->z_parent = zfsvfs;
dentry = spath.dentry;
spath.mnt->mnt_flags |= MNT_SHRINKABLE;
rw_enter(&zfs_snapshot_lock, RW_WRITER);
se = zfsctl_snapshot_alloc(full_name, full_path,
snap_zfsvfs->z_os->os_spa, dmu_objset_id(snap_zfsvfs->z_os),
dentry);
zfsctl_snapshot_add(se);
zfsctl_snapshot_unmount_delay_impl(se, zfs_expire_snapshot);
rw_exit(&zfs_snapshot_lock);
}
path_put(&spath);
error:
kmem_free(full_name, ZFS_MAX_DATASET_NAME_LEN);
kmem_free(full_path, MAXPATHLEN);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Get the snapdir inode from fid
*/
int
zfsctl_snapdir_vget(struct super_block *sb, uint64_t objsetid, int gen,
struct inode **ipp)
{
int error;
struct path path;
char *mnt;
struct dentry *dentry;
mnt = kmem_alloc(MAXPATHLEN, KM_SLEEP);
error = zfsctl_snapshot_path_objset(sb->s_fs_info, objsetid,
MAXPATHLEN, mnt);
if (error)
goto out;
/* Trigger automount */
error = -kern_path(mnt, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &path);
if (error)
goto out;
path_put(&path);
/*
* Get the snapdir inode. Note, we don't want to use the above
* path because it contains the root of the snapshot rather
* than the snapdir.
*/
*ipp = ilookup(sb, ZFSCTL_INO_SNAPDIRS - objsetid);
if (*ipp == NULL) {
error = SET_ERROR(ENOENT);
goto out;
}
/* check gen, see zfsctl_snapdir_fid */
dentry = d_obtain_alias(igrab(*ipp));
if (gen != (!IS_ERR(dentry) && d_mountpoint(dentry))) {
iput(*ipp);
*ipp = NULL;
error = SET_ERROR(ENOENT);
}
if (!IS_ERR(dentry))
dput(dentry);
out:
kmem_free(mnt, MAXPATHLEN);
return (error);
}
int
zfsctl_shares_lookup(struct inode *dip, char *name, struct inode **ipp,
int flags, cred_t *cr, int *direntflags, pathname_t *realpnp)
{
zfsvfs_t *zfsvfs = ITOZSB(dip);
znode_t *zp;
znode_t *dzp;
int error;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
if (zfsvfs->z_shares_dir == 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOTSUP));
}
if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0) {
error = zfs_lookup(dzp, name, &zp, 0, cr, NULL, NULL);
zrele(dzp);
}
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Initialize the various pieces we'll need to create and manipulate .zfs
* directories. Currently this is unused but available.
*/
void
zfsctl_init(void)
{
avl_create(&zfs_snapshots_by_name, snapentry_compare_by_name,
sizeof (zfs_snapentry_t), offsetof(zfs_snapentry_t,
se_node_name));
avl_create(&zfs_snapshots_by_objsetid, snapentry_compare_by_objsetid,
sizeof (zfs_snapentry_t), offsetof(zfs_snapentry_t,
se_node_objsetid));
rw_init(&zfs_snapshot_lock, NULL, RW_DEFAULT, NULL);
}
/*
* Cleanup the various pieces we needed for .zfs directories. In particular
* ensure the expiry timer is canceled safely.
*/
void
zfsctl_fini(void)
{
avl_destroy(&zfs_snapshots_by_name);
avl_destroy(&zfs_snapshots_by_objsetid);
rw_destroy(&zfs_snapshot_lock);
}
module_param(zfs_admin_snapshot, int, 0644);
MODULE_PARM_DESC(zfs_admin_snapshot, "Enable mkdir/rmdir/mv in .zfs/snapshot");
module_param(zfs_expire_snapshot, int, 0644);
MODULE_PARM_DESC(zfs_expire_snapshot, "Seconds to expire .zfs/snapshot");
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
index 6b6293b9e482..464c12e1108d 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
@@ -1,2123 +1,2130 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/sysmacros.h>
#include <sys/kmem.h>
#include <sys/pathname.h>
#include <sys/vnode.h>
#include <sys/vfs.h>
#include <sys/mntent.h>
#include <sys/cmn_err.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_dir.h>
#include <sys/zil.h>
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_deleg.h>
#include <sys/spa.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/policy.h>
#include <sys/atomic.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_quota.h>
#include <sys/sunddi.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/objlist.h>
#include <sys/zpl.h>
#include <linux/vfs_compat.h>
#include "zfs_comutil.h"
enum {
TOKEN_RO,
TOKEN_RW,
TOKEN_SETUID,
TOKEN_NOSETUID,
TOKEN_EXEC,
TOKEN_NOEXEC,
TOKEN_DEVICES,
TOKEN_NODEVICES,
TOKEN_DIRXATTR,
TOKEN_SAXATTR,
TOKEN_XATTR,
TOKEN_NOXATTR,
TOKEN_ATIME,
TOKEN_NOATIME,
TOKEN_RELATIME,
TOKEN_NORELATIME,
TOKEN_NBMAND,
TOKEN_NONBMAND,
TOKEN_MNTPOINT,
TOKEN_LAST,
};
static const match_table_t zpl_tokens = {
{ TOKEN_RO, MNTOPT_RO },
{ TOKEN_RW, MNTOPT_RW },
{ TOKEN_SETUID, MNTOPT_SETUID },
{ TOKEN_NOSETUID, MNTOPT_NOSETUID },
{ TOKEN_EXEC, MNTOPT_EXEC },
{ TOKEN_NOEXEC, MNTOPT_NOEXEC },
{ TOKEN_DEVICES, MNTOPT_DEVICES },
{ TOKEN_NODEVICES, MNTOPT_NODEVICES },
{ TOKEN_DIRXATTR, MNTOPT_DIRXATTR },
{ TOKEN_SAXATTR, MNTOPT_SAXATTR },
{ TOKEN_XATTR, MNTOPT_XATTR },
{ TOKEN_NOXATTR, MNTOPT_NOXATTR },
{ TOKEN_ATIME, MNTOPT_ATIME },
{ TOKEN_NOATIME, MNTOPT_NOATIME },
{ TOKEN_RELATIME, MNTOPT_RELATIME },
{ TOKEN_NORELATIME, MNTOPT_NORELATIME },
{ TOKEN_NBMAND, MNTOPT_NBMAND },
{ TOKEN_NONBMAND, MNTOPT_NONBMAND },
{ TOKEN_MNTPOINT, MNTOPT_MNTPOINT "=%s" },
{ TOKEN_LAST, NULL },
};
static void
zfsvfs_vfs_free(vfs_t *vfsp)
{
if (vfsp != NULL) {
if (vfsp->vfs_mntpoint != NULL)
kmem_strfree(vfsp->vfs_mntpoint);
kmem_free(vfsp, sizeof (vfs_t));
}
}
static int
zfsvfs_parse_option(char *option, int token, substring_t *args, vfs_t *vfsp)
{
switch (token) {
case TOKEN_RO:
vfsp->vfs_readonly = B_TRUE;
vfsp->vfs_do_readonly = B_TRUE;
break;
case TOKEN_RW:
vfsp->vfs_readonly = B_FALSE;
vfsp->vfs_do_readonly = B_TRUE;
break;
case TOKEN_SETUID:
vfsp->vfs_setuid = B_TRUE;
vfsp->vfs_do_setuid = B_TRUE;
break;
case TOKEN_NOSETUID:
vfsp->vfs_setuid = B_FALSE;
vfsp->vfs_do_setuid = B_TRUE;
break;
case TOKEN_EXEC:
vfsp->vfs_exec = B_TRUE;
vfsp->vfs_do_exec = B_TRUE;
break;
case TOKEN_NOEXEC:
vfsp->vfs_exec = B_FALSE;
vfsp->vfs_do_exec = B_TRUE;
break;
case TOKEN_DEVICES:
vfsp->vfs_devices = B_TRUE;
vfsp->vfs_do_devices = B_TRUE;
break;
case TOKEN_NODEVICES:
vfsp->vfs_devices = B_FALSE;
vfsp->vfs_do_devices = B_TRUE;
break;
case TOKEN_DIRXATTR:
vfsp->vfs_xattr = ZFS_XATTR_DIR;
vfsp->vfs_do_xattr = B_TRUE;
break;
case TOKEN_SAXATTR:
vfsp->vfs_xattr = ZFS_XATTR_SA;
vfsp->vfs_do_xattr = B_TRUE;
break;
case TOKEN_XATTR:
vfsp->vfs_xattr = ZFS_XATTR_DIR;
vfsp->vfs_do_xattr = B_TRUE;
break;
case TOKEN_NOXATTR:
vfsp->vfs_xattr = ZFS_XATTR_OFF;
vfsp->vfs_do_xattr = B_TRUE;
break;
case TOKEN_ATIME:
vfsp->vfs_atime = B_TRUE;
vfsp->vfs_do_atime = B_TRUE;
break;
case TOKEN_NOATIME:
vfsp->vfs_atime = B_FALSE;
vfsp->vfs_do_atime = B_TRUE;
break;
case TOKEN_RELATIME:
vfsp->vfs_relatime = B_TRUE;
vfsp->vfs_do_relatime = B_TRUE;
break;
case TOKEN_NORELATIME:
vfsp->vfs_relatime = B_FALSE;
vfsp->vfs_do_relatime = B_TRUE;
break;
case TOKEN_NBMAND:
vfsp->vfs_nbmand = B_TRUE;
vfsp->vfs_do_nbmand = B_TRUE;
break;
case TOKEN_NONBMAND:
vfsp->vfs_nbmand = B_FALSE;
vfsp->vfs_do_nbmand = B_TRUE;
break;
case TOKEN_MNTPOINT:
vfsp->vfs_mntpoint = match_strdup(&args[0]);
if (vfsp->vfs_mntpoint == NULL)
return (SET_ERROR(ENOMEM));
break;
default:
break;
}
return (0);
}
/*
* Parse the raw mntopts and return a vfs_t describing the options.
*/
static int
zfsvfs_parse_options(char *mntopts, vfs_t **vfsp)
{
vfs_t *tmp_vfsp;
int error;
tmp_vfsp = kmem_zalloc(sizeof (vfs_t), KM_SLEEP);
if (mntopts != NULL) {
substring_t args[MAX_OPT_ARGS];
char *tmp_mntopts, *p, *t;
int token;
tmp_mntopts = t = kmem_strdup(mntopts);
if (tmp_mntopts == NULL)
return (SET_ERROR(ENOMEM));
while ((p = strsep(&t, ",")) != NULL) {
if (!*p)
continue;
args[0].to = args[0].from = NULL;
token = match_token(p, zpl_tokens, args);
error = zfsvfs_parse_option(p, token, args, tmp_vfsp);
if (error) {
kmem_strfree(tmp_mntopts);
zfsvfs_vfs_free(tmp_vfsp);
return (error);
}
}
kmem_strfree(tmp_mntopts);
}
*vfsp = tmp_vfsp;
return (0);
}
boolean_t
zfs_is_readonly(zfsvfs_t *zfsvfs)
{
return (!!(zfsvfs->z_sb->s_flags & SB_RDONLY));
}
int
zfs_sync(struct super_block *sb, int wait, cred_t *cr)
{
(void) cr;
zfsvfs_t *zfsvfs = sb->s_fs_info;
/*
* Semantically, the only requirement is that the sync be initiated.
* The DMU syncs out txgs frequently, so there's nothing to do.
*/
if (!wait)
return (0);
if (zfsvfs != NULL) {
/*
* Sync a specific filesystem.
*/
dsl_pool_t *dp;
int error;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
dp = dmu_objset_pool(zfsvfs->z_os);
/*
* If the system is shutting down, then skip any
* filesystems which may exist on a suspended pool.
*/
if (spa_suspended(dp->dp_spa)) {
zfs_exit(zfsvfs, FTAG);
return (0);
}
if (zfsvfs->z_log != NULL)
zil_commit(zfsvfs->z_log, 0);
zfs_exit(zfsvfs, FTAG);
} else {
/*
* Sync all ZFS filesystems. This is what happens when you
* run sync(1). Unlike other filesystems, ZFS honors the
* request by waiting for all pools to commit all dirty data.
*/
spa_sync_allpools();
}
return (0);
}
static void
atime_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
struct super_block *sb = zfsvfs->z_sb;
if (sb == NULL)
return;
/*
* Update SB_NOATIME bit in VFS super block. Since atime update is
* determined by atime_needs_update(), atime_needs_update() needs to
* return false if atime is turned off, and not unconditionally return
* false if atime is turned on.
*/
if (newval)
sb->s_flags &= ~SB_NOATIME;
else
sb->s_flags |= SB_NOATIME;
}
static void
relatime_changed_cb(void *arg, uint64_t newval)
{
((zfsvfs_t *)arg)->z_relatime = newval;
}
static void
xattr_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == ZFS_XATTR_OFF) {
zfsvfs->z_flags &= ~ZSB_XATTR;
} else {
zfsvfs->z_flags |= ZSB_XATTR;
if (newval == ZFS_XATTR_SA)
zfsvfs->z_xattr_sa = B_TRUE;
else
zfsvfs->z_xattr_sa = B_FALSE;
}
}
static void
acltype_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
switch (newval) {
case ZFS_ACLTYPE_NFSV4:
case ZFS_ACLTYPE_OFF:
zfsvfs->z_acl_type = ZFS_ACLTYPE_OFF;
zfsvfs->z_sb->s_flags &= ~SB_POSIXACL;
break;
case ZFS_ACLTYPE_POSIX:
#ifdef CONFIG_FS_POSIX_ACL
zfsvfs->z_acl_type = ZFS_ACLTYPE_POSIX;
zfsvfs->z_sb->s_flags |= SB_POSIXACL;
#else
zfsvfs->z_acl_type = ZFS_ACLTYPE_OFF;
zfsvfs->z_sb->s_flags &= ~SB_POSIXACL;
#endif /* CONFIG_FS_POSIX_ACL */
break;
default:
break;
}
}
static void
blksz_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
ASSERT3U(newval, <=, spa_maxblocksize(dmu_objset_spa(zfsvfs->z_os)));
ASSERT3U(newval, >=, SPA_MINBLOCKSIZE);
ASSERT(ISP2(newval));
zfsvfs->z_max_blksz = newval;
}
static void
readonly_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
struct super_block *sb = zfsvfs->z_sb;
if (sb == NULL)
return;
if (newval)
sb->s_flags |= SB_RDONLY;
else
sb->s_flags &= ~SB_RDONLY;
}
static void
devices_changed_cb(void *arg, uint64_t newval)
{
}
static void
setuid_changed_cb(void *arg, uint64_t newval)
{
}
static void
exec_changed_cb(void *arg, uint64_t newval)
{
}
static void
nbmand_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
struct super_block *sb = zfsvfs->z_sb;
if (sb == NULL)
return;
if (newval == TRUE)
sb->s_flags |= SB_MANDLOCK;
else
sb->s_flags &= ~SB_MANDLOCK;
}
static void
snapdir_changed_cb(void *arg, uint64_t newval)
{
((zfsvfs_t *)arg)->z_show_ctldir = newval;
}
static void
acl_mode_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_acl_mode = newval;
}
static void
acl_inherit_changed_cb(void *arg, uint64_t newval)
{
((zfsvfs_t *)arg)->z_acl_inherit = newval;
}
static int
zfs_register_callbacks(vfs_t *vfsp)
{
struct dsl_dataset *ds = NULL;
objset_t *os = NULL;
zfsvfs_t *zfsvfs = NULL;
int error = 0;
ASSERT(vfsp);
zfsvfs = vfsp->vfs_data;
ASSERT(zfsvfs);
os = zfsvfs->z_os;
/*
* The act of registering our callbacks will destroy any mount
* options we may have. In order to enable temporary overrides
* of mount options, we stash away the current values and
* restore them after we register the callbacks.
*/
if (zfs_is_readonly(zfsvfs) || !spa_writeable(dmu_objset_spa(os))) {
vfsp->vfs_do_readonly = B_TRUE;
vfsp->vfs_readonly = B_TRUE;
}
/*
* Register property callbacks.
*
* It would probably be fine to just check for i/o error from
* the first prop_register(), but I guess I like to go
* overboard...
*/
ds = dmu_objset_ds(os);
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
error = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ATIME), atime_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RELATIME), relatime_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_XATTR), xattr_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RECORDSIZE), blksz_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_READONLY), readonly_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_DEVICES), devices_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SETUID), setuid_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_EXEC), exec_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SNAPDIR), snapdir_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLTYPE), acltype_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLMODE), acl_mode_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLINHERIT), acl_inherit_changed_cb,
zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_NBMAND), nbmand_changed_cb, zfsvfs);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
if (error)
goto unregister;
/*
* Invoke our callbacks to restore temporary mount options.
*/
if (vfsp->vfs_do_readonly)
readonly_changed_cb(zfsvfs, vfsp->vfs_readonly);
if (vfsp->vfs_do_setuid)
setuid_changed_cb(zfsvfs, vfsp->vfs_setuid);
if (vfsp->vfs_do_exec)
exec_changed_cb(zfsvfs, vfsp->vfs_exec);
if (vfsp->vfs_do_devices)
devices_changed_cb(zfsvfs, vfsp->vfs_devices);
if (vfsp->vfs_do_xattr)
xattr_changed_cb(zfsvfs, vfsp->vfs_xattr);
if (vfsp->vfs_do_atime)
atime_changed_cb(zfsvfs, vfsp->vfs_atime);
if (vfsp->vfs_do_relatime)
relatime_changed_cb(zfsvfs, vfsp->vfs_relatime);
if (vfsp->vfs_do_nbmand)
nbmand_changed_cb(zfsvfs, vfsp->vfs_nbmand);
return (0);
unregister:
dsl_prop_unregister_all(ds, zfsvfs);
return (error);
}
/*
* Takes a dataset, a property, a value and that value's setpoint as
* found in the ZAP. Checks if the property has been changed in the vfs.
* If so, val and setpoint will be overwritten with updated content.
* Otherwise, they are left unchanged.
*/
int
zfs_get_temporary_prop(dsl_dataset_t *ds, zfs_prop_t zfs_prop, uint64_t *val,
char *setpoint)
{
int error;
zfsvfs_t *zfvp;
vfs_t *vfsp;
objset_t *os;
uint64_t tmp = *val;
error = dmu_objset_from_ds(ds, &os);
if (error != 0)
return (error);
if (dmu_objset_type(os) != DMU_OST_ZFS)
return (EINVAL);
mutex_enter(&os->os_user_ptr_lock);
zfvp = dmu_objset_get_user(os);
mutex_exit(&os->os_user_ptr_lock);
if (zfvp == NULL)
return (ESRCH);
vfsp = zfvp->z_vfs;
switch (zfs_prop) {
case ZFS_PROP_ATIME:
if (vfsp->vfs_do_atime)
tmp = vfsp->vfs_atime;
break;
case ZFS_PROP_RELATIME:
if (vfsp->vfs_do_relatime)
tmp = vfsp->vfs_relatime;
break;
case ZFS_PROP_DEVICES:
if (vfsp->vfs_do_devices)
tmp = vfsp->vfs_devices;
break;
case ZFS_PROP_EXEC:
if (vfsp->vfs_do_exec)
tmp = vfsp->vfs_exec;
break;
case ZFS_PROP_SETUID:
if (vfsp->vfs_do_setuid)
tmp = vfsp->vfs_setuid;
break;
case ZFS_PROP_READONLY:
if (vfsp->vfs_do_readonly)
tmp = vfsp->vfs_readonly;
break;
case ZFS_PROP_XATTR:
if (vfsp->vfs_do_xattr)
tmp = vfsp->vfs_xattr;
break;
case ZFS_PROP_NBMAND:
if (vfsp->vfs_do_nbmand)
tmp = vfsp->vfs_nbmand;
break;
default:
return (ENOENT);
}
if (tmp != *val) {
if (setpoint)
(void) strcpy(setpoint, "temporary");
*val = tmp;
}
return (0);
}
/*
* Associate this zfsvfs with the given objset, which must be owned.
* This will cache a bunch of on-disk state from the objset in the
* zfsvfs.
*/
static int
zfsvfs_init(zfsvfs_t *zfsvfs, objset_t *os)
{
int error;
uint64_t val;
zfsvfs->z_max_blksz = SPA_OLD_MAXBLOCKSIZE;
zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
zfsvfs->z_os = os;
error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zfsvfs->z_version);
if (error != 0)
return (error);
if (zfsvfs->z_version >
zfs_zpl_version_map(spa_version(dmu_objset_spa(os)))) {
(void) printk("Can't mount a version %lld file system "
"on a version %lld pool\n. Pool must be upgraded to mount "
"this file system.\n", (u_longlong_t)zfsvfs->z_version,
(u_longlong_t)spa_version(dmu_objset_spa(os)));
return (SET_ERROR(ENOTSUP));
}
error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &val);
if (error != 0)
return (error);
zfsvfs->z_norm = (int)val;
error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &val);
if (error != 0)
return (error);
zfsvfs->z_utf8 = (val != 0);
error = zfs_get_zplprop(os, ZFS_PROP_CASE, &val);
if (error != 0)
return (error);
zfsvfs->z_case = (uint_t)val;
if ((error = zfs_get_zplprop(os, ZFS_PROP_ACLTYPE, &val)) != 0)
return (error);
zfsvfs->z_acl_type = (uint_t)val;
/*
* Fold case on file systems that are always or sometimes case
* insensitive.
*/
if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
zfsvfs->z_case == ZFS_CASE_MIXED)
zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
uint64_t sa_obj = 0;
if (zfsvfs->z_use_sa) {
/* should either have both of these objects or none */
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
&sa_obj);
if (error != 0)
return (error);
error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &val);
if ((error == 0) && (val == ZFS_XATTR_SA))
zfsvfs->z_xattr_sa = B_TRUE;
}
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
&zfsvfs->z_root);
if (error != 0)
return (error);
ASSERT(zfsvfs->z_root != 0);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
&zfsvfs->z_unlinkedobj);
if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
8, 1, &zfsvfs->z_userquota_obj);
if (error == ENOENT)
zfsvfs->z_userquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
8, 1, &zfsvfs->z_groupquota_obj);
if (error == ENOENT)
zfsvfs->z_groupquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTQUOTA],
8, 1, &zfsvfs->z_projectquota_obj);
if (error == ENOENT)
zfsvfs->z_projectquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_USEROBJQUOTA],
8, 1, &zfsvfs->z_userobjquota_obj);
if (error == ENOENT)
zfsvfs->z_userobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPOBJQUOTA],
8, 1, &zfsvfs->z_groupobjquota_obj);
if (error == ENOENT)
zfsvfs->z_groupobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTOBJQUOTA],
8, 1, &zfsvfs->z_projectobjquota_obj);
if (error == ENOENT)
zfsvfs->z_projectobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
&zfsvfs->z_fuid_obj);
if (error == ENOENT)
zfsvfs->z_fuid_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
&zfsvfs->z_shares_dir);
if (error == ENOENT)
zfsvfs->z_shares_dir = 0;
else if (error != 0)
return (error);
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
&zfsvfs->z_attr_table);
if (error != 0)
return (error);
if (zfsvfs->z_version >= ZPL_VERSION_SA)
sa_register_update_callback(os, zfs_sa_upgrade);
return (0);
}
int
zfsvfs_create(const char *osname, boolean_t readonly, zfsvfs_t **zfvp)
{
objset_t *os;
zfsvfs_t *zfsvfs;
int error;
boolean_t ro = (readonly || (strchr(osname, '@') != NULL));
zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
error = dmu_objset_own(osname, DMU_OST_ZFS, ro, B_TRUE, zfsvfs, &os);
if (error != 0) {
kmem_free(zfsvfs, sizeof (zfsvfs_t));
return (error);
}
error = zfsvfs_create_impl(zfvp, zfsvfs, os);
return (error);
}
/*
* Note: zfsvfs is assumed to be malloc'd, and will be freed by this function
* on a failure. Do not pass in a statically allocated zfsvfs.
*/
int
zfsvfs_create_impl(zfsvfs_t **zfvp, zfsvfs_t *zfsvfs, objset_t *os)
{
int error;
zfsvfs->z_vfs = NULL;
zfsvfs->z_sb = NULL;
zfsvfs->z_parent = zfsvfs;
mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zfsvfs->z_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
offsetof(znode_t, z_link_node));
ZFS_TEARDOWN_INIT(zfsvfs);
rw_init(&zfsvfs->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL);
int size = MIN(1 << (highbit64(zfs_object_mutex_size) - 1),
ZFS_OBJ_MTX_MAX);
zfsvfs->z_hold_size = size;
zfsvfs->z_hold_trees = vmem_zalloc(sizeof (avl_tree_t) * size,
KM_SLEEP);
zfsvfs->z_hold_locks = vmem_zalloc(sizeof (kmutex_t) * size, KM_SLEEP);
for (int i = 0; i != size; i++) {
avl_create(&zfsvfs->z_hold_trees[i], zfs_znode_hold_compare,
sizeof (znode_hold_t), offsetof(znode_hold_t, zh_node));
mutex_init(&zfsvfs->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL);
}
error = zfsvfs_init(zfsvfs, os);
if (error != 0) {
dmu_objset_disown(os, B_TRUE, zfsvfs);
*zfvp = NULL;
zfsvfs_free(zfsvfs);
return (error);
}
zfsvfs->z_drain_task = TASKQID_INVALID;
zfsvfs->z_draining = B_FALSE;
zfsvfs->z_drain_cancel = B_TRUE;
*zfvp = zfsvfs;
return (0);
}
static int
zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
{
int error;
boolean_t readonly = zfs_is_readonly(zfsvfs);
error = zfs_register_callbacks(zfsvfs->z_vfs);
if (error)
return (error);
/*
* If we are not mounting (ie: online recv), then we don't
* have to worry about replaying the log as we blocked all
* operations out since we closed the ZIL.
*/
if (mounting) {
ASSERT3P(zfsvfs->z_kstat.dk_kstats, ==, NULL);
error = dataset_kstats_create(&zfsvfs->z_kstat, zfsvfs->z_os);
if (error)
return (error);
zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data,
&zfsvfs->z_kstat.dk_zil_sums);
/*
* During replay we remove the read only flag to
* allow replays to succeed.
*/
if (readonly != 0) {
readonly_changed_cb(zfsvfs, B_FALSE);
} else {
zap_stats_t zs;
if (zap_get_stats(zfsvfs->z_os, zfsvfs->z_unlinkedobj,
&zs) == 0) {
dataset_kstats_update_nunlinks_kstat(
&zfsvfs->z_kstat, zs.zs_num_entries);
dprintf_ds(zfsvfs->z_os->os_dsl_dataset,
"num_entries in unlinked set: %llu",
zs.zs_num_entries);
}
zfs_unlinked_drain(zfsvfs);
dsl_dir_t *dd = zfsvfs->z_os->os_dsl_dataset->ds_dir;
dd->dd_activity_cancelled = B_FALSE;
}
/*
* Parse and replay the intent log.
*
* Because of ziltest, this must be done after
* zfs_unlinked_drain(). (Further note: ziltest
* doesn't use readonly mounts, where
* zfs_unlinked_drain() isn't called.) This is because
* ziltest causes spa_sync() to think it's committed,
* but actually it is not, so the intent log contains
* many txg's worth of changes.
*
* In particular, if object N is in the unlinked set in
* the last txg to actually sync, then it could be
* actually freed in a later txg and then reallocated
* in a yet later txg. This would write a "create
* object N" record to the intent log. Normally, this
* would be fine because the spa_sync() would have
* written out the fact that object N is free, before
* we could write the "create object N" intent log
* record.
*
* But when we are in ziltest mode, we advance the "open
* txg" without actually spa_sync()-ing the changes to
* disk. So we would see that object N is still
* allocated and in the unlinked set, and there is an
* intent log record saying to allocate it.
*/
if (spa_writeable(dmu_objset_spa(zfsvfs->z_os))) {
if (zil_replay_disable) {
zil_destroy(zfsvfs->z_log, B_FALSE);
} else {
zfsvfs->z_replay = B_TRUE;
zil_replay(zfsvfs->z_os, zfsvfs,
zfs_replay_vector);
zfsvfs->z_replay = B_FALSE;
}
}
/* restore readonly bit */
if (readonly != 0)
readonly_changed_cb(zfsvfs, B_TRUE);
} else {
ASSERT3P(zfsvfs->z_kstat.dk_kstats, !=, NULL);
zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data,
&zfsvfs->z_kstat.dk_zil_sums);
}
/*
* Set the objset user_ptr to track its zfsvfs.
*/
mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
return (0);
}
void
zfsvfs_free(zfsvfs_t *zfsvfs)
{
int i, size = zfsvfs->z_hold_size;
zfs_fuid_destroy(zfsvfs);
mutex_destroy(&zfsvfs->z_znodes_lock);
mutex_destroy(&zfsvfs->z_lock);
list_destroy(&zfsvfs->z_all_znodes);
ZFS_TEARDOWN_DESTROY(zfsvfs);
rw_destroy(&zfsvfs->z_teardown_inactive_lock);
rw_destroy(&zfsvfs->z_fuid_lock);
for (i = 0; i != size; i++) {
avl_destroy(&zfsvfs->z_hold_trees[i]);
mutex_destroy(&zfsvfs->z_hold_locks[i]);
}
vmem_free(zfsvfs->z_hold_trees, sizeof (avl_tree_t) * size);
vmem_free(zfsvfs->z_hold_locks, sizeof (kmutex_t) * size);
zfsvfs_vfs_free(zfsvfs->z_vfs);
dataset_kstats_destroy(&zfsvfs->z_kstat);
kmem_free(zfsvfs, sizeof (zfsvfs_t));
}
static void
zfs_set_fuid_feature(zfsvfs_t *zfsvfs)
{
zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
}
static void
zfs_unregister_callbacks(zfsvfs_t *zfsvfs)
{
objset_t *os = zfsvfs->z_os;
if (!dmu_objset_is_snapshot(os))
dsl_prop_unregister_all(dmu_objset_ds(os), zfsvfs);
}
#ifdef HAVE_MLSLABEL
/*
* Check that the hex label string is appropriate for the dataset being
* mounted into the global_zone proper.
*
* Return an error if the hex label string is not default or
* admin_low/admin_high. For admin_low labels, the corresponding
* dataset must be readonly.
*/
int
zfs_check_global_label(const char *dsname, const char *hexsl)
{
if (strcasecmp(hexsl, ZFS_MLSLABEL_DEFAULT) == 0)
return (0);
if (strcasecmp(hexsl, ADMIN_HIGH) == 0)
return (0);
if (strcasecmp(hexsl, ADMIN_LOW) == 0) {
/* must be readonly */
uint64_t rdonly;
if (dsl_prop_get_integer(dsname,
zfs_prop_to_name(ZFS_PROP_READONLY), &rdonly, NULL))
return (SET_ERROR(EACCES));
return (rdonly ? 0 : SET_ERROR(EACCES));
}
return (SET_ERROR(EACCES));
}
#endif /* HAVE_MLSLABEL */
static int
zfs_statfs_project(zfsvfs_t *zfsvfs, znode_t *zp, struct kstatfs *statp,
uint32_t bshift)
{
char buf[20 + DMU_OBJACCT_PREFIX_LEN];
uint64_t offset = DMU_OBJACCT_PREFIX_LEN;
uint64_t quota;
uint64_t used;
int err;
strlcpy(buf, DMU_OBJACCT_PREFIX, DMU_OBJACCT_PREFIX_LEN + 1);
err = zfs_id_to_fuidstr(zfsvfs, NULL, zp->z_projid, buf + offset,
sizeof (buf) - offset, B_FALSE);
if (err)
return (err);
if (zfsvfs->z_projectquota_obj == 0)
goto objs;
err = zap_lookup(zfsvfs->z_os, zfsvfs->z_projectquota_obj,
buf + offset, 8, 1, &quota);
if (err == ENOENT)
goto objs;
else if (err)
return (err);
err = zap_lookup(zfsvfs->z_os, DMU_PROJECTUSED_OBJECT,
buf + offset, 8, 1, &used);
if (unlikely(err == ENOENT)) {
uint32_t blksize;
u_longlong_t nblocks;
/*
* Quota accounting is async, so it is possible race case.
* There is at least one object with the given project ID.
*/
sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
if (unlikely(zp->z_blksz == 0))
blksize = zfsvfs->z_max_blksz;
used = blksize * nblocks;
} else if (err) {
return (err);
}
statp->f_blocks = quota >> bshift;
statp->f_bfree = (quota > used) ? ((quota - used) >> bshift) : 0;
statp->f_bavail = statp->f_bfree;
objs:
if (zfsvfs->z_projectobjquota_obj == 0)
return (0);
err = zap_lookup(zfsvfs->z_os, zfsvfs->z_projectobjquota_obj,
buf + offset, 8, 1, &quota);
if (err == ENOENT)
return (0);
else if (err)
return (err);
err = zap_lookup(zfsvfs->z_os, DMU_PROJECTUSED_OBJECT,
buf, 8, 1, &used);
if (unlikely(err == ENOENT)) {
/*
* Quota accounting is async, so it is possible race case.
* There is at least one object with the given project ID.
*/
used = 1;
} else if (err) {
return (err);
}
statp->f_files = quota;
statp->f_ffree = (quota > used) ? (quota - used) : 0;
return (0);
}
int
zfs_statvfs(struct inode *ip, struct kstatfs *statp)
{
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint64_t refdbytes, availbytes, usedobjs, availobjs;
int err = 0;
if ((err = zfs_enter(zfsvfs, FTAG)) != 0)
return (err);
dmu_objset_space(zfsvfs->z_os,
&refdbytes, &availbytes, &usedobjs, &availobjs);
uint64_t fsid = dmu_objset_fsid_guid(zfsvfs->z_os);
/*
* The underlying storage pool actually uses multiple block
* size. Under Solaris frsize (fragment size) is reported as
* the smallest block size we support, and bsize (block size)
* as the filesystem's maximum block size. Unfortunately,
* under Linux the fragment size and block size are often used
* interchangeably. Thus we are forced to report both of them
* as the filesystem's maximum block size.
*/
statp->f_frsize = zfsvfs->z_max_blksz;
statp->f_bsize = zfsvfs->z_max_blksz;
uint32_t bshift = fls(statp->f_bsize) - 1;
/*
* The following report "total" blocks of various kinds in
* the file system, but reported in terms of f_bsize - the
* "preferred" size.
*/
/* Round up so we never have a filesystem using 0 blocks. */
refdbytes = P2ROUNDUP(refdbytes, statp->f_bsize);
statp->f_blocks = (refdbytes + availbytes) >> bshift;
statp->f_bfree = availbytes >> bshift;
statp->f_bavail = statp->f_bfree; /* no root reservation */
/*
* statvfs() should really be called statufs(), because it assumes
* static metadata. ZFS doesn't preallocate files, so the best
* we can do is report the max that could possibly fit in f_files,
* and that minus the number actually used in f_ffree.
* For f_ffree, report the smaller of the number of objects available
* and the number of blocks (each object will take at least a block).
*/
statp->f_ffree = MIN(availobjs, availbytes >> DNODE_SHIFT);
statp->f_files = statp->f_ffree + usedobjs;
statp->f_fsid.val[0] = (uint32_t)fsid;
statp->f_fsid.val[1] = (uint32_t)(fsid >> 32);
statp->f_type = ZFS_SUPER_MAGIC;
statp->f_namelen = MAXNAMELEN - 1;
/*
* We have all of 40 characters to stuff a string here.
* Is there anything useful we could/should provide?
*/
memset(statp->f_spare, 0, sizeof (statp->f_spare));
if (dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
dmu_objset_projectquota_present(zfsvfs->z_os)) {
znode_t *zp = ITOZ(ip);
if (zp->z_pflags & ZFS_PROJINHERIT && zp->z_projid &&
zpl_is_valid_projid(zp->z_projid))
err = zfs_statfs_project(zfsvfs, zp, statp, bshift);
}
zfs_exit(zfsvfs, FTAG);
return (err);
}
static int
zfs_root(zfsvfs_t *zfsvfs, struct inode **ipp)
{
znode_t *rootzp;
int error;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp);
if (error == 0)
*ipp = ZTOI(rootzp);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Linux kernels older than 3.1 do not support a per-filesystem shrinker.
* To accommodate this we must improvise and manually walk the list of znodes
* attempting to prune dentries in order to be able to drop the inodes.
*
* To avoid scanning the same znodes multiple times they are always rotated
* to the end of the z_all_znodes list. New znodes are inserted at the
* end of the list so we're always scanning the oldest znodes first.
*/
static int
zfs_prune_aliases(zfsvfs_t *zfsvfs, unsigned long nr_to_scan)
{
znode_t **zp_array, *zp;
int max_array = MIN(nr_to_scan, PAGE_SIZE * 8 / sizeof (znode_t *));
int objects = 0;
int i = 0, j = 0;
zp_array = vmem_zalloc(max_array * sizeof (znode_t *), KM_SLEEP);
mutex_enter(&zfsvfs->z_znodes_lock);
while ((zp = list_head(&zfsvfs->z_all_znodes)) != NULL) {
if ((i++ > nr_to_scan) || (j >= max_array))
break;
ASSERT(list_link_active(&zp->z_link_node));
list_remove(&zfsvfs->z_all_znodes, zp);
list_insert_tail(&zfsvfs->z_all_znodes, zp);
/* Skip active znodes and .zfs entries */
if (MUTEX_HELD(&zp->z_lock) || zp->z_is_ctldir)
continue;
if (igrab(ZTOI(zp)) == NULL)
continue;
zp_array[j] = zp;
j++;
}
mutex_exit(&zfsvfs->z_znodes_lock);
for (i = 0; i < j; i++) {
zp = zp_array[i];
ASSERT3P(zp, !=, NULL);
d_prune_aliases(ZTOI(zp));
if (atomic_read(&ZTOI(zp)->i_count) == 1)
objects++;
zrele(zp);
}
vmem_free(zp_array, max_array * sizeof (znode_t *));
return (objects);
}
/*
* The ARC has requested that the filesystem drop entries from the dentry
* and inode caches. This can occur when the ARC needs to free meta data
* blocks but can't because they are all pinned by entries in these caches.
*/
int
zfs_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
int error = 0;
struct shrinker *shrinker = &sb->s_shrink;
struct shrink_control sc = {
.nr_to_scan = nr_to_scan,
.gfp_mask = GFP_KERNEL,
};
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
#if defined(HAVE_SPLIT_SHRINKER_CALLBACK) && \
defined(SHRINK_CONTROL_HAS_NID) && \
defined(SHRINKER_NUMA_AWARE)
if (sb->s_shrink.flags & SHRINKER_NUMA_AWARE) {
*objects = 0;
for_each_online_node(sc.nid) {
*objects += (*shrinker->scan_objects)(shrinker, &sc);
/*
* reset sc.nr_to_scan, modified by
* scan_objects == super_cache_scan
*/
sc.nr_to_scan = nr_to_scan;
}
} else {
*objects = (*shrinker->scan_objects)(shrinker, &sc);
}
#elif defined(HAVE_SPLIT_SHRINKER_CALLBACK)
*objects = (*shrinker->scan_objects)(shrinker, &sc);
#elif defined(HAVE_SINGLE_SHRINKER_CALLBACK)
*objects = (*shrinker->shrink)(shrinker, &sc);
#elif defined(HAVE_D_PRUNE_ALIASES)
#define D_PRUNE_ALIASES_IS_DEFAULT
*objects = zfs_prune_aliases(zfsvfs, nr_to_scan);
#else
#error "No available dentry and inode cache pruning mechanism."
#endif
#if defined(HAVE_D_PRUNE_ALIASES) && !defined(D_PRUNE_ALIASES_IS_DEFAULT)
#undef D_PRUNE_ALIASES_IS_DEFAULT
/*
* Fall back to zfs_prune_aliases if the kernel's per-superblock
* shrinker couldn't free anything, possibly due to the inodes being
* allocated in a different memcg.
*/
if (*objects == 0)
*objects = zfs_prune_aliases(zfsvfs, nr_to_scan);
#endif
zfs_exit(zfsvfs, FTAG);
dprintf_ds(zfsvfs->z_os->os_dsl_dataset,
"pruning, nr_to_scan=%lu objects=%d error=%d\n",
nr_to_scan, *objects, error);
return (error);
}
/*
* Teardown the zfsvfs_t.
*
* Note, if 'unmounting' is FALSE, we return with the 'z_teardown_lock'
* and 'z_teardown_inactive_lock' held.
*/
static int
zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
{
znode_t *zp;
zfs_unlinked_drain_stop_wait(zfsvfs);
/*
* If someone has not already unmounted this file system,
* drain the zrele_taskq to ensure all active references to the
* zfsvfs_t have been handled only then can it be safely destroyed.
*/
if (zfsvfs->z_os) {
/*
* If we're unmounting we have to wait for the list to
* drain completely.
*
* If we're not unmounting there's no guarantee the list
* will drain completely, but iputs run from the taskq
* may add the parents of dir-based xattrs to the taskq
* so we want to wait for these.
*
* We can safely read z_nr_znodes without locking because the
* VFS has already blocked operations which add to the
* z_all_znodes list and thus increment z_nr_znodes.
*/
int round = 0;
while (zfsvfs->z_nr_znodes > 0) {
taskq_wait_outstanding(dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)), 0);
if (++round > 1 && !unmounting)
break;
}
}
ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, FTAG);
if (!unmounting) {
/*
* We purge the parent filesystem's super block as the
* parent filesystem and all of its snapshots have their
* inode's super block set to the parent's filesystem's
* super block. Note, 'z_parent' is self referential
* for non-snapshots.
*/
shrink_dcache_sb(zfsvfs->z_parent->z_sb);
}
/*
* Close the zil. NB: Can't close the zil while zfs_inactive
* threads are blocked as zil_close can call zfs_inactive.
*/
if (zfsvfs->z_log) {
zil_close(zfsvfs->z_log);
zfsvfs->z_log = NULL;
}
rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_WRITER);
/*
* If we are not unmounting (ie: online recv) and someone already
* unmounted this file system while we were doing the switcheroo,
* or a reopen of z_os failed then just bail out now.
*/
if (!unmounting && (zfsvfs->z_unmounted || zfsvfs->z_os == NULL)) {
rw_exit(&zfsvfs->z_teardown_inactive_lock);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
return (SET_ERROR(EIO));
}
/*
* At this point there are no VFS ops active, and any new VFS ops
* will fail with EIO since we have z_teardown_lock for writer (only
* relevant for forced unmount).
*
* Release all holds on dbufs. We also grab an extra reference to all
* the remaining inodes so that the kernel does not attempt to free
* any inodes of a suspended fs. This can cause deadlocks since the
* zfs_resume_fs() process may involve starting threads, which might
* attempt to free unreferenced inodes to free up memory for the new
* thread.
*/
if (!unmounting) {
mutex_enter(&zfsvfs->z_znodes_lock);
for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL;
zp = list_next(&zfsvfs->z_all_znodes, zp)) {
if (zp->z_sa_hdl)
zfs_znode_dmu_fini(zp);
if (igrab(ZTOI(zp)) != NULL)
zp->z_suspended = B_TRUE;
}
mutex_exit(&zfsvfs->z_znodes_lock);
}
/*
* If we are unmounting, set the unmounted flag and let new VFS ops
* unblock. zfs_inactive will have the unmounted behavior, and all
* other VFS ops will fail with EIO.
*/
if (unmounting) {
zfsvfs->z_unmounted = B_TRUE;
rw_exit(&zfsvfs->z_teardown_inactive_lock);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
}
/*
* z_os will be NULL if there was an error in attempting to reopen
* zfsvfs, so just return as the properties had already been
*
* unregistered and cached data had been evicted before.
*/
if (zfsvfs->z_os == NULL)
return (0);
/*
* Unregister properties.
*/
zfs_unregister_callbacks(zfsvfs);
/*
* Evict cached data. We must write out any dirty data before
* disowning the dataset.
*/
objset_t *os = zfsvfs->z_os;
boolean_t os_dirty = B_FALSE;
for (int t = 0; t < TXG_SIZE; t++) {
if (dmu_objset_is_dirty(os, t)) {
os_dirty = B_TRUE;
break;
}
}
if (!zfs_is_readonly(zfsvfs) && os_dirty) {
txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
}
dmu_objset_evict_dbufs(zfsvfs->z_os);
dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
dsl_dir_cancel_waiters(dd);
return (0);
}
#if defined(HAVE_SUPER_SETUP_BDI_NAME)
atomic_long_t zfs_bdi_seq = ATOMIC_LONG_INIT(0);
#endif
int
zfs_domount(struct super_block *sb, zfs_mnt_t *zm, int silent)
{
const char *osname = zm->mnt_osname;
struct inode *root_inode = NULL;
uint64_t recordsize;
int error = 0;
zfsvfs_t *zfsvfs = NULL;
vfs_t *vfs = NULL;
int canwrite;
int dataset_visible_zone;
ASSERT(zm);
ASSERT(osname);
dataset_visible_zone = zone_dataset_visible(osname, &canwrite);
/*
* Refuse to mount a filesystem if we are in a namespace and the
* dataset is not visible or writable in that namespace.
*/
if (!INGLOBALZONE(curproc) &&
(!dataset_visible_zone || !canwrite)) {
return (SET_ERROR(EPERM));
}
error = zfsvfs_parse_options(zm->mnt_data, &vfs);
if (error)
return (error);
/*
* If a non-writable filesystem is being mounted without the
* read-only flag, pretend it was set, as done for snapshots.
*/
if (!canwrite)
vfs->vfs_readonly = true;
error = zfsvfs_create(osname, vfs->vfs_readonly, &zfsvfs);
if (error) {
zfsvfs_vfs_free(vfs);
goto out;
}
if ((error = dsl_prop_get_integer(osname, "recordsize",
&recordsize, NULL))) {
zfsvfs_vfs_free(vfs);
goto out;
}
vfs->vfs_data = zfsvfs;
zfsvfs->z_vfs = vfs;
zfsvfs->z_sb = sb;
sb->s_fs_info = zfsvfs;
sb->s_magic = ZFS_SUPER_MAGIC;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_time_gran = 1;
sb->s_blocksize = recordsize;
sb->s_blocksize_bits = ilog2(recordsize);
error = -zpl_bdi_setup(sb, "zfs");
if (error)
goto out;
sb->s_bdi->ra_pages = 0;
/* Set callback operations for the file system. */
sb->s_op = &zpl_super_operations;
sb->s_xattr = zpl_xattr_handlers;
sb->s_export_op = &zpl_export_operations;
/* Set features for file system. */
zfs_set_fuid_feature(zfsvfs);
if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
uint64_t pval;
atime_changed_cb(zfsvfs, B_FALSE);
readonly_changed_cb(zfsvfs, B_TRUE);
if ((error = dsl_prop_get_integer(osname,
"xattr", &pval, NULL)))
goto out;
xattr_changed_cb(zfsvfs, pval);
if ((error = dsl_prop_get_integer(osname,
"acltype", &pval, NULL)))
goto out;
acltype_changed_cb(zfsvfs, pval);
zfsvfs->z_issnap = B_TRUE;
zfsvfs->z_os->os_sync = ZFS_SYNC_DISABLED;
zfsvfs->z_snap_defer_time = jiffies;
mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
} else {
if ((error = zfsvfs_setup(zfsvfs, B_TRUE)))
goto out;
}
/* Allocate a root inode for the filesystem. */
error = zfs_root(zfsvfs, &root_inode);
if (error) {
(void) zfs_umount(sb);
zfsvfs = NULL; /* avoid double-free; first in zfs_umount */
goto out;
}
/* Allocate a root dentry for the filesystem */
sb->s_root = d_make_root(root_inode);
if (sb->s_root == NULL) {
(void) zfs_umount(sb);
zfsvfs = NULL; /* avoid double-free; first in zfs_umount */
error = SET_ERROR(ENOMEM);
goto out;
}
if (!zfsvfs->z_issnap)
zfsctl_create(zfsvfs);
zfsvfs->z_arc_prune = arc_add_prune_callback(zpl_prune_sb, sb);
out:
if (error) {
if (zfsvfs != NULL) {
dmu_objset_disown(zfsvfs->z_os, B_TRUE, zfsvfs);
zfsvfs_free(zfsvfs);
}
/*
* make sure we don't have dangling sb->s_fs_info which
* zfs_preumount will use.
*/
sb->s_fs_info = NULL;
}
return (error);
}
/*
* Called when an unmount is requested and certain sanity checks have
* already passed. At this point no dentries or inodes have been reclaimed
* from their respective caches. We drop the extra reference on the .zfs
* control directory to allow everything to be reclaimed. All snapshots
* must already have been unmounted to reach this point.
*/
void
zfs_preumount(struct super_block *sb)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
/* zfsvfs is NULL when zfs_domount fails during mount */
if (zfsvfs) {
zfs_unlinked_drain_stop_wait(zfsvfs);
zfsctl_destroy(sb->s_fs_info);
/*
* Wait for zrele_async before entering evict_inodes in
* generic_shutdown_super. The reason we must finish before
* evict_inodes is when lazytime is on, or when zfs_purgedir
* calls zfs_zget, zrele would bump i_count from 0 to 1. This
* would race with the i_count check in evict_inodes. This means
* it could destroy the inode while we are still using it.
*
* We wait for two passes. xattr directories in the first pass
* may add xattr entries in zfs_purgedir, so in the second pass
* we wait for them. We don't use taskq_wait here because it is
* a pool wide taskq. Other mounted filesystems can constantly
* do zrele_async and there's no guarantee when taskq will be
* empty.
*/
taskq_wait_outstanding(dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)), 0);
taskq_wait_outstanding(dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)), 0);
}
}
/*
* Called once all other unmount released tear down has occurred.
* It is our responsibility to release any remaining infrastructure.
*/
int
zfs_umount(struct super_block *sb)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
objset_t *os;
if (zfsvfs->z_arc_prune != NULL)
arc_remove_prune_callback(zfsvfs->z_arc_prune);
VERIFY(zfsvfs_teardown(zfsvfs, B_TRUE) == 0);
os = zfsvfs->z_os;
zpl_bdi_destroy(sb);
/*
* z_os will be NULL if there was an error in
* attempting to reopen zfsvfs.
*/
if (os != NULL) {
/*
* Unset the objset user_ptr.
*/
mutex_enter(&os->os_user_ptr_lock);
dmu_objset_set_user(os, NULL);
mutex_exit(&os->os_user_ptr_lock);
/*
* Finally release the objset
*/
dmu_objset_disown(os, B_TRUE, zfsvfs);
}
zfsvfs_free(zfsvfs);
+ sb->s_fs_info = NULL;
return (0);
}
int
zfs_remount(struct super_block *sb, int *flags, zfs_mnt_t *zm)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
vfs_t *vfsp;
boolean_t issnap = dmu_objset_is_snapshot(zfsvfs->z_os);
int error;
if ((issnap || !spa_writeable(dmu_objset_spa(zfsvfs->z_os))) &&
!(*flags & SB_RDONLY)) {
*flags |= SB_RDONLY;
return (EROFS);
}
error = zfsvfs_parse_options(zm->mnt_data, &vfsp);
if (error)
return (error);
if (!zfs_is_readonly(zfsvfs) && (*flags & SB_RDONLY))
txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
zfs_unregister_callbacks(zfsvfs);
zfsvfs_vfs_free(zfsvfs->z_vfs);
vfsp->vfs_data = zfsvfs;
zfsvfs->z_vfs = vfsp;
if (!issnap)
(void) zfs_register_callbacks(vfsp);
return (error);
}
int
zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
znode_t *zp;
uint64_t object = 0;
uint64_t fid_gen = 0;
uint64_t gen_mask;
uint64_t zp_gen;
int i, err;
*ipp = NULL;
if (fidp->fid_len == SHORT_FID_LEN || fidp->fid_len == LONG_FID_LEN) {
zfid_short_t *zfid = (zfid_short_t *)fidp;
for (i = 0; i < sizeof (zfid->zf_object); i++)
object |= ((uint64_t)zfid->zf_object[i]) << (8 * i);
for (i = 0; i < sizeof (zfid->zf_gen); i++)
fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);
} else {
return (SET_ERROR(EINVAL));
}
/* LONG_FID_LEN means snapdirs */
if (fidp->fid_len == LONG_FID_LEN) {
zfid_long_t *zlfid = (zfid_long_t *)fidp;
uint64_t objsetid = 0;
uint64_t setgen = 0;
for (i = 0; i < sizeof (zlfid->zf_setid); i++)
objsetid |= ((uint64_t)zlfid->zf_setid[i]) << (8 * i);
for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
setgen |= ((uint64_t)zlfid->zf_setgen[i]) << (8 * i);
if (objsetid != ZFSCTL_INO_SNAPDIRS - object) {
dprintf("snapdir fid: objsetid (%llu) != "
"ZFSCTL_INO_SNAPDIRS (%llu) - object (%llu)\n",
objsetid, ZFSCTL_INO_SNAPDIRS, object);
return (SET_ERROR(EINVAL));
}
if (fid_gen > 1 || setgen != 0) {
dprintf("snapdir fid: fid_gen (%llu) and setgen "
"(%llu)\n", fid_gen, setgen);
return (SET_ERROR(EINVAL));
}
return (zfsctl_snapdir_vget(sb, objsetid, fid_gen, ipp));
}
if ((err = zfs_enter(zfsvfs, FTAG)) != 0)
return (err);
/* A zero fid_gen means we are in the .zfs control directories */
if (fid_gen == 0 &&
(object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) {
*ipp = zfsvfs->z_ctldir;
ASSERT(*ipp != NULL);
if (object == ZFSCTL_INO_SNAPDIR) {
VERIFY(zfsctl_root_lookup(*ipp, "snapshot", ipp,
0, kcred, NULL, NULL) == 0);
} else {
/*
* Must have an existing ref, so igrab()
* cannot return NULL
*/
VERIFY3P(igrab(*ipp), !=, NULL);
}
zfs_exit(zfsvfs, FTAG);
return (0);
}
gen_mask = -1ULL >> (64 - 8 * i);
dprintf("getting %llu [%llu mask %llx]\n", object, fid_gen, gen_mask);
if ((err = zfs_zget(zfsvfs, object, &zp))) {
zfs_exit(zfsvfs, FTAG);
return (err);
}
/* Don't export xattr stuff */
if (zp->z_pflags & ZFS_XATTR) {
zrele(zp);
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOENT));
}
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
sizeof (uint64_t));
zp_gen = zp_gen & gen_mask;
if (zp_gen == 0)
zp_gen = 1;
if ((fid_gen == 0) && (zfsvfs->z_root == object))
fid_gen = zp_gen;
if (zp->z_unlinked || zp_gen != fid_gen) {
dprintf("znode gen (%llu) != fid gen (%llu)\n", zp_gen,
fid_gen);
zrele(zp);
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOENT));
}
*ipp = ZTOI(zp);
if (*ipp)
zfs_znode_update_vfs(ITOZ(*ipp));
zfs_exit(zfsvfs, FTAG);
return (0);
}
/*
* Block out VFS ops and close zfsvfs_t
*
* Note, if successful, then we return with the 'z_teardown_lock' and
* 'z_teardown_inactive_lock' write held. We leave ownership of the underlying
* dataset and objset intact so that they can be atomically handed off during
* a subsequent rollback or recv operation and the resume thereafter.
*/
int
zfs_suspend_fs(zfsvfs_t *zfsvfs)
{
int error;
if ((error = zfsvfs_teardown(zfsvfs, B_FALSE)) != 0)
return (error);
return (0);
}
/*
* Rebuild SA and release VOPs. Note that ownership of the underlying dataset
* is an invariant across any of the operations that can be performed while the
* filesystem was suspended. Whether it succeeded or failed, the preconditions
* are the same: the relevant objset and associated dataset are owned by
* zfsvfs, held, and long held on entry.
*/
int
zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
{
int err, err2;
znode_t *zp;
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));
/*
* We already own this, so just update the objset_t, as the one we
* had before may have been evicted.
*/
objset_t *os;
VERIFY3P(ds->ds_owner, ==, zfsvfs);
VERIFY(dsl_dataset_long_held(ds));
dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dmu_objset_from_ds(ds, &os));
dsl_pool_config_exit(dp, FTAG);
err = zfsvfs_init(zfsvfs, os);
if (err != 0)
goto bail;
ds->ds_dir->dd_activity_cancelled = B_FALSE;
VERIFY(zfsvfs_setup(zfsvfs, B_FALSE) == 0);
zfs_set_fuid_feature(zfsvfs);
zfsvfs->z_rollback_time = jiffies;
/*
* Attempt to re-establish all the active inodes with their
* dbufs. If a zfs_rezget() fails, then we unhash the inode
* and mark it stale. This prevents a collision if a new
* inode/object is created which must use the same inode
* number. The stale inode will be be released when the
* VFS prunes the dentry holding the remaining references
* on the stale inode.
*/
mutex_enter(&zfsvfs->z_znodes_lock);
for (zp = list_head(&zfsvfs->z_all_znodes); zp;
zp = list_next(&zfsvfs->z_all_znodes, zp)) {
err2 = zfs_rezget(zp);
if (err2) {
zpl_d_drop_aliases(ZTOI(zp));
remove_inode_hash(ZTOI(zp));
}
/* see comment in zfs_suspend_fs() */
if (zp->z_suspended) {
zfs_zrele_async(zp);
zp->z_suspended = B_FALSE;
}
}
mutex_exit(&zfsvfs->z_znodes_lock);
if (!zfs_is_readonly(zfsvfs) && !zfsvfs->z_unmounted) {
/*
* zfs_suspend_fs() could have interrupted freeing
* of dnodes. We need to restart this freeing so
* that we don't "leak" the space.
*/
zfs_unlinked_drain(zfsvfs);
}
/*
* Most of the time zfs_suspend_fs is used for changing the contents
* of the underlying dataset. ZFS rollback and receive operations
* might create files for which negative dentries are present in
* the cache. Since walking the dcache would require a lot of GPL-only
* code duplication, it's much easier on these rather rare occasions
* just to flush the whole dcache for the given dataset/filesystem.
*/
shrink_dcache_sb(zfsvfs->z_sb);
bail:
if (err != 0)
zfsvfs->z_unmounted = B_TRUE;
/* release the VFS ops */
rw_exit(&zfsvfs->z_teardown_inactive_lock);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
if (err != 0) {
/*
* Since we couldn't setup the sa framework, try to force
* unmount this file system.
*/
if (zfsvfs->z_os)
(void) zfs_umount(zfsvfs->z_sb);
}
return (err);
}
/*
* Release VOPs and unmount a suspended filesystem.
*/
int
zfs_end_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
{
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));
/*
* We already own this, so just hold and rele it to update the
* objset_t, as the one we had before may have been evicted.
*/
objset_t *os;
VERIFY3P(ds->ds_owner, ==, zfsvfs);
VERIFY(dsl_dataset_long_held(ds));
dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dmu_objset_from_ds(ds, &os));
dsl_pool_config_exit(dp, FTAG);
zfsvfs->z_os = os;
/* release the VOPs */
rw_exit(&zfsvfs->z_teardown_inactive_lock);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
/*
* Try to force unmount this file system.
*/
(void) zfs_umount(zfsvfs->z_sb);
zfsvfs->z_unmounted = B_TRUE;
return (0);
}
/*
* Automounted snapshots rely on periodic revalidation
* to defer snapshots from being automatically unmounted.
*/
inline void
zfs_exit_fs(zfsvfs_t *zfsvfs)
{
if (!zfsvfs->z_issnap)
return;
if (time_after(jiffies, zfsvfs->z_snap_defer_time +
MAX(zfs_expire_snapshot * HZ / 2, HZ))) {
zfsvfs->z_snap_defer_time = jiffies;
zfsctl_snapshot_unmount_delay(zfsvfs->z_os->os_spa,
dmu_objset_id(zfsvfs->z_os),
zfs_expire_snapshot);
}
}
int
zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
{
int error;
objset_t *os = zfsvfs->z_os;
dmu_tx_t *tx;
if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
return (SET_ERROR(EINVAL));
if (newvers < zfsvfs->z_version)
return (SET_ERROR(EINVAL));
if (zfs_spa_version_map(newvers) >
spa_version(dmu_objset_spa(zfsvfs->z_os)))
return (SET_ERROR(ENOTSUP));
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR);
if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
ZFS_SA_ATTRS);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
}
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
return (error);
}
error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
8, 1, &newvers, tx);
if (error) {
dmu_tx_commit(tx);
return (error);
}
if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
uint64_t sa_obj;
ASSERT3U(spa_version(dmu_objset_spa(zfsvfs->z_os)), >=,
SPA_VERSION_SA);
sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
DMU_OT_NONE, 0, tx);
error = zap_add(os, MASTER_NODE_OBJ,
ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
ASSERT0(error);
VERIFY(0 == sa_set_sa_object(os, sa_obj));
sa_register_update_callback(os, zfs_sa_upgrade);
}
spa_history_log_internal_ds(dmu_objset_ds(os), "upgrade", tx,
"from %llu to %llu", zfsvfs->z_version, newvers);
dmu_tx_commit(tx);
zfsvfs->z_version = newvers;
os->os_version = newvers;
zfs_set_fuid_feature(zfsvfs);
return (0);
}
/*
* Return true if the corresponding vfs's unmounted flag is set.
* Otherwise return false.
* If this function returns true we know VFS unmount has been initiated.
*/
boolean_t
zfs_get_vfs_flag_unmounted(objset_t *os)
{
zfsvfs_t *zfvp;
boolean_t unmounted = B_FALSE;
ASSERT(dmu_objset_type(os) == DMU_OST_ZFS);
mutex_enter(&os->os_user_ptr_lock);
zfvp = dmu_objset_get_user(os);
if (zfvp != NULL && zfvp->z_unmounted)
unmounted = B_TRUE;
mutex_exit(&os->os_user_ptr_lock);
return (unmounted);
}
void
zfsvfs_update_fromname(const char *oldname, const char *newname)
{
/*
* We don't need to do anything here, the devname is always current by
* virtue of zfsvfs->z_sb->s_op->show_devname.
*/
(void) oldname, (void) newname;
}
void
zfs_init(void)
{
zfsctl_init();
zfs_znode_init();
dmu_objset_register_type(DMU_OST_ZFS, zpl_get_file_info);
register_filesystem(&zpl_fs_type);
+#ifdef HAVE_VFS_FILE_OPERATIONS_EXTEND
+ register_fo_extend(&zpl_file_operations);
+#endif
}
void
zfs_fini(void)
{
/*
* we don't use outstanding because zpl_posix_acl_free might add more.
*/
taskq_wait(system_delay_taskq);
taskq_wait(system_taskq);
+#ifdef HAVE_VFS_FILE_OPERATIONS_EXTEND
+ unregister_fo_extend(&zpl_file_operations);
+#endif
unregister_filesystem(&zpl_fs_type);
zfs_znode_fini();
zfsctl_fini();
}
#if defined(_KERNEL)
EXPORT_SYMBOL(zfs_suspend_fs);
EXPORT_SYMBOL(zfs_resume_fs);
EXPORT_SYMBOL(zfs_set_version);
EXPORT_SYMBOL(zfsvfs_create);
EXPORT_SYMBOL(zfsvfs_free);
EXPORT_SYMBOL(zfs_is_readonly);
EXPORT_SYMBOL(zfs_domount);
EXPORT_SYMBOL(zfs_preumount);
EXPORT_SYMBOL(zfs_umount);
EXPORT_SYMBOL(zfs_remount);
EXPORT_SYMBOL(zfs_statvfs);
EXPORT_SYMBOL(zfs_vget);
EXPORT_SYMBOL(zfs_prune);
#endif
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c
index 02b1af3edc4f..335ae3460c58 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c
@@ -1,2352 +1,2360 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
*/
/* Portions Copyright 2007 Jeremy Teo */
#ifdef _KERNEL
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/sysmacros.h>
#include <sys/mntent.h>
#include <sys/u8_textprep.h>
#include <sys/dsl_dataset.h>
#include <sys/vfs.h>
#include <sys/vnode.h>
#include <sys/file.h>
#include <sys/kmem.h>
#include <sys/errno.h>
#include <sys/atomic.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_rlock.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_ctldir.h>
#include <sys/dnode.h>
#include <sys/fs/zfs.h>
#include <sys/zpl.h>
#endif /* _KERNEL */
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_tx.h>
#include <sys/zfs_refcount.h>
#include <sys/stat.h>
#include <sys/zap.h>
#include <sys/zfs_znode.h>
#include <sys/sa.h>
#include <sys/zfs_sa.h>
#include <sys/zfs_stat.h>
#include "zfs_prop.h"
#include "zfs_comutil.h"
/*
* Functions needed for userland (ie: libzpool) are not put under
* #ifdef_KERNEL; the rest of the functions have dependencies
* (such as VFS logic) that will not compile easily in userland.
*/
#ifdef _KERNEL
static kmem_cache_t *znode_cache = NULL;
static kmem_cache_t *znode_hold_cache = NULL;
unsigned int zfs_object_mutex_size = ZFS_OBJ_MTX_SZ;
/*
* This is used by the test suite so that it can delay znodes from being
* freed in order to inspect the unlinked set.
*/
static int zfs_unlink_suspend_progress = 0;
/*
* This callback is invoked when acquiring a RL_WRITER or RL_APPEND lock on
* z_rangelock. It will modify the offset and length of the lock to reflect
* znode-specific information, and convert RL_APPEND to RL_WRITER. This is
* called with the rangelock_t's rl_lock held, which avoids races.
*/
static void
zfs_rangelock_cb(zfs_locked_range_t *new, void *arg)
{
znode_t *zp = arg;
/*
* If in append mode, convert to writer and lock starting at the
* current end of file.
*/
if (new->lr_type == RL_APPEND) {
new->lr_offset = zp->z_size;
new->lr_type = RL_WRITER;
}
/*
* If we need to grow the block size then lock the whole file range.
*/
uint64_t end_size = MAX(zp->z_size, new->lr_offset + new->lr_length);
if (end_size > zp->z_blksz && (!ISP2(zp->z_blksz) ||
zp->z_blksz < ZTOZSB(zp)->z_max_blksz)) {
new->lr_offset = 0;
new->lr_length = UINT64_MAX;
}
}
static int
zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
{
(void) arg, (void) kmflags;
znode_t *zp = buf;
inode_init_once(ZTOI(zp));
list_link_init(&zp->z_link_node);
mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
rw_init(&zp->z_parent_lock, NULL, RW_DEFAULT, NULL);
rw_init(&zp->z_name_lock, NULL, RW_NOLOCKDEP, NULL);
mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL);
rw_init(&zp->z_xattr_lock, NULL, RW_DEFAULT, NULL);
zfs_rangelock_init(&zp->z_rangelock, zfs_rangelock_cb, zp);
zp->z_dirlocks = NULL;
zp->z_acl_cached = NULL;
zp->z_xattr_cached = NULL;
zp->z_xattr_parent = 0;
zp->z_sync_writes_cnt = 0;
zp->z_async_writes_cnt = 0;
return (0);
}
static void
zfs_znode_cache_destructor(void *buf, void *arg)
{
(void) arg;
znode_t *zp = buf;
ASSERT(!list_link_active(&zp->z_link_node));
mutex_destroy(&zp->z_lock);
rw_destroy(&zp->z_parent_lock);
rw_destroy(&zp->z_name_lock);
mutex_destroy(&zp->z_acl_lock);
rw_destroy(&zp->z_xattr_lock);
zfs_rangelock_fini(&zp->z_rangelock);
ASSERT3P(zp->z_dirlocks, ==, NULL);
ASSERT3P(zp->z_acl_cached, ==, NULL);
ASSERT3P(zp->z_xattr_cached, ==, NULL);
ASSERT0(atomic_load_32(&zp->z_sync_writes_cnt));
ASSERT0(atomic_load_32(&zp->z_async_writes_cnt));
}
static int
zfs_znode_hold_cache_constructor(void *buf, void *arg, int kmflags)
{
(void) arg, (void) kmflags;
znode_hold_t *zh = buf;
mutex_init(&zh->zh_lock, NULL, MUTEX_DEFAULT, NULL);
zh->zh_refcount = 0;
return (0);
}
static void
zfs_znode_hold_cache_destructor(void *buf, void *arg)
{
(void) arg;
znode_hold_t *zh = buf;
mutex_destroy(&zh->zh_lock);
}
void
zfs_znode_init(void)
{
/*
* Initialize zcache. The KMC_SLAB hint is used in order that it be
* backed by kmalloc() when on the Linux slab in order that any
* wait_on_bit() operations on the related inode operate properly.
*/
ASSERT(znode_cache == NULL);
znode_cache = kmem_cache_create("zfs_znode_cache",
sizeof (znode_t), 0, zfs_znode_cache_constructor,
zfs_znode_cache_destructor, NULL, NULL, NULL, KMC_SLAB);
ASSERT(znode_hold_cache == NULL);
znode_hold_cache = kmem_cache_create("zfs_znode_hold_cache",
sizeof (znode_hold_t), 0, zfs_znode_hold_cache_constructor,
zfs_znode_hold_cache_destructor, NULL, NULL, NULL, 0);
}
void
zfs_znode_fini(void)
{
/*
* Cleanup zcache
*/
if (znode_cache)
kmem_cache_destroy(znode_cache);
znode_cache = NULL;
if (znode_hold_cache)
kmem_cache_destroy(znode_hold_cache);
znode_hold_cache = NULL;
}
/*
* The zfs_znode_hold_enter() / zfs_znode_hold_exit() functions are used to
* serialize access to a znode and its SA buffer while the object is being
* created or destroyed. This kind of locking would normally reside in the
* znode itself but in this case that's impossible because the znode and SA
* buffer may not yet exist. Therefore the locking is handled externally
* with an array of mutexes and AVLs trees which contain per-object locks.
*
* In zfs_znode_hold_enter() a per-object lock is created as needed, inserted
* in to the correct AVL tree and finally the per-object lock is held. In
* zfs_znode_hold_exit() the process is reversed. The per-object lock is
* released, removed from the AVL tree and destroyed if there are no waiters.
*
* This scheme has two important properties:
*
* 1) No memory allocations are performed while holding one of the z_hold_locks.
* This ensures evict(), which can be called from direct memory reclaim, will
* never block waiting on a z_hold_locks which just happens to have hashed
* to the same index.
*
* 2) All locks used to serialize access to an object are per-object and never
* shared. This minimizes lock contention without creating a large number
* of dedicated locks.
*
* On the downside it does require znode_lock_t structures to be frequently
* allocated and freed. However, because these are backed by a kmem cache
* and very short lived this cost is minimal.
*/
int
zfs_znode_hold_compare(const void *a, const void *b)
{
const znode_hold_t *zh_a = (const znode_hold_t *)a;
const znode_hold_t *zh_b = (const znode_hold_t *)b;
return (TREE_CMP(zh_a->zh_obj, zh_b->zh_obj));
}
static boolean_t __maybe_unused
zfs_znode_held(zfsvfs_t *zfsvfs, uint64_t obj)
{
znode_hold_t *zh, search;
int i = ZFS_OBJ_HASH(zfsvfs, obj);
boolean_t held;
search.zh_obj = obj;
mutex_enter(&zfsvfs->z_hold_locks[i]);
zh = avl_find(&zfsvfs->z_hold_trees[i], &search, NULL);
held = (zh && MUTEX_HELD(&zh->zh_lock)) ? B_TRUE : B_FALSE;
mutex_exit(&zfsvfs->z_hold_locks[i]);
return (held);
}
znode_hold_t *
zfs_znode_hold_enter(zfsvfs_t *zfsvfs, uint64_t obj)
{
znode_hold_t *zh, *zh_new, search;
int i = ZFS_OBJ_HASH(zfsvfs, obj);
boolean_t found = B_FALSE;
zh_new = kmem_cache_alloc(znode_hold_cache, KM_SLEEP);
search.zh_obj = obj;
mutex_enter(&zfsvfs->z_hold_locks[i]);
zh = avl_find(&zfsvfs->z_hold_trees[i], &search, NULL);
if (likely(zh == NULL)) {
zh = zh_new;
zh->zh_obj = obj;
avl_add(&zfsvfs->z_hold_trees[i], zh);
} else {
ASSERT3U(zh->zh_obj, ==, obj);
found = B_TRUE;
}
zh->zh_refcount++;
ASSERT3S(zh->zh_refcount, >, 0);
mutex_exit(&zfsvfs->z_hold_locks[i]);
if (found == B_TRUE)
kmem_cache_free(znode_hold_cache, zh_new);
ASSERT(MUTEX_NOT_HELD(&zh->zh_lock));
mutex_enter(&zh->zh_lock);
return (zh);
}
void
zfs_znode_hold_exit(zfsvfs_t *zfsvfs, znode_hold_t *zh)
{
int i = ZFS_OBJ_HASH(zfsvfs, zh->zh_obj);
boolean_t remove = B_FALSE;
ASSERT(zfs_znode_held(zfsvfs, zh->zh_obj));
mutex_exit(&zh->zh_lock);
mutex_enter(&zfsvfs->z_hold_locks[i]);
ASSERT3S(zh->zh_refcount, >, 0);
if (--zh->zh_refcount == 0) {
avl_remove(&zfsvfs->z_hold_trees[i], zh);
remove = B_TRUE;
}
mutex_exit(&zfsvfs->z_hold_locks[i]);
if (remove == B_TRUE)
kmem_cache_free(znode_hold_cache, zh);
}
dev_t
zfs_cmpldev(uint64_t dev)
{
return (dev);
}
static void
zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
dmu_buf_t *db, dmu_object_type_t obj_type, sa_handle_t *sa_hdl)
{
ASSERT(zfs_znode_held(zfsvfs, zp->z_id));
mutex_enter(&zp->z_lock);
ASSERT(zp->z_sa_hdl == NULL);
ASSERT(zp->z_acl_cached == NULL);
if (sa_hdl == NULL) {
VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, zp,
SA_HDL_SHARED, &zp->z_sa_hdl));
} else {
zp->z_sa_hdl = sa_hdl;
sa_set_userp(sa_hdl, zp);
}
zp->z_is_sa = (obj_type == DMU_OT_SA) ? B_TRUE : B_FALSE;
mutex_exit(&zp->z_lock);
}
void
zfs_znode_dmu_fini(znode_t *zp)
{
ASSERT(zfs_znode_held(ZTOZSB(zp), zp->z_id) ||
RW_WRITE_HELD(&ZTOZSB(zp)->z_teardown_inactive_lock));
sa_handle_destroy(zp->z_sa_hdl);
zp->z_sa_hdl = NULL;
}
/*
* Called by new_inode() to allocate a new inode.
*/
int
zfs_inode_alloc(struct super_block *sb, struct inode **ip)
{
znode_t *zp;
zp = kmem_cache_alloc(znode_cache, KM_SLEEP);
*ip = ZTOI(zp);
return (0);
}
/*
* Called in multiple places when an inode should be destroyed.
*/
void
zfs_inode_destroy(struct inode *ip)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
mutex_enter(&zfsvfs->z_znodes_lock);
if (list_link_active(&zp->z_link_node)) {
list_remove(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes--;
}
mutex_exit(&zfsvfs->z_znodes_lock);
if (zp->z_acl_cached) {
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = NULL;
}
if (zp->z_xattr_cached) {
nvlist_free(zp->z_xattr_cached);
zp->z_xattr_cached = NULL;
}
kmem_cache_free(znode_cache, zp);
}
static void
zfs_inode_set_ops(zfsvfs_t *zfsvfs, struct inode *ip)
{
uint64_t rdev = 0;
switch (ip->i_mode & S_IFMT) {
case S_IFREG:
ip->i_op = &zpl_inode_operations;
+#ifdef HAVE_VFS_FILE_OPERATIONS_EXTEND
+ ip->i_fop = &zpl_file_operations.kabi_fops;
+#else
ip->i_fop = &zpl_file_operations;
+#endif
ip->i_mapping->a_ops = &zpl_address_space_operations;
break;
case S_IFDIR:
#ifdef HAVE_RENAME2_OPERATIONS_WRAPPER
ip->i_flags |= S_IOPS_WRAPPER;
ip->i_op = &zpl_dir_inode_operations.ops;
#else
ip->i_op = &zpl_dir_inode_operations;
#endif
ip->i_fop = &zpl_dir_file_operations;
ITOZ(ip)->z_zn_prefetch = B_TRUE;
break;
case S_IFLNK:
ip->i_op = &zpl_symlink_inode_operations;
break;
/*
* rdev is only stored in a SA only for device files.
*/
case S_IFCHR:
case S_IFBLK:
(void) sa_lookup(ITOZ(ip)->z_sa_hdl, SA_ZPL_RDEV(zfsvfs), &rdev,
sizeof (rdev));
zfs_fallthrough;
case S_IFIFO:
case S_IFSOCK:
init_special_inode(ip, ip->i_mode, rdev);
ip->i_op = &zpl_special_inode_operations;
break;
default:
zfs_panic_recover("inode %llu has invalid mode: 0x%x\n",
(u_longlong_t)ip->i_ino, ip->i_mode);
/* Assume the inode is a file and attempt to continue */
ip->i_mode = S_IFREG | 0644;
ip->i_op = &zpl_inode_operations;
+#ifdef HAVE_VFS_FILE_OPERATIONS_EXTEND
+ ip->i_fop = &zpl_file_operations.kabi_fops;
+#else
ip->i_fop = &zpl_file_operations;
+#endif
ip->i_mapping->a_ops = &zpl_address_space_operations;
break;
}
}
static void
zfs_set_inode_flags(znode_t *zp, struct inode *ip)
{
/*
* Linux and Solaris have different sets of file attributes, so we
* restrict this conversion to the intersection of the two.
*/
#ifdef HAVE_INODE_SET_FLAGS
unsigned int flags = 0;
if (zp->z_pflags & ZFS_IMMUTABLE)
flags |= S_IMMUTABLE;
if (zp->z_pflags & ZFS_APPENDONLY)
flags |= S_APPEND;
inode_set_flags(ip, flags, S_IMMUTABLE|S_APPEND);
#else
if (zp->z_pflags & ZFS_IMMUTABLE)
ip->i_flags |= S_IMMUTABLE;
else
ip->i_flags &= ~S_IMMUTABLE;
if (zp->z_pflags & ZFS_APPENDONLY)
ip->i_flags |= S_APPEND;
else
ip->i_flags &= ~S_APPEND;
#endif
}
/*
* Update the embedded inode given the znode.
*/
void
zfs_znode_update_vfs(znode_t *zp)
{
struct inode *ip;
uint32_t blksize;
u_longlong_t i_blocks;
ASSERT(zp != NULL);
ip = ZTOI(zp);
/* Skip .zfs control nodes which do not exist on disk. */
if (zfsctl_is_node(ip))
return;
dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &blksize, &i_blocks);
spin_lock(&ip->i_lock);
ip->i_mode = zp->z_mode;
ip->i_blocks = i_blocks;
i_size_write(ip, zp->z_size);
spin_unlock(&ip->i_lock);
}
/*
* Construct a znode+inode and initialize.
*
* This does not do a call to dmu_set_user() that is
* up to the caller to do, in case you don't want to
* return the znode
*/
static znode_t *
zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
dmu_object_type_t obj_type, sa_handle_t *hdl)
{
znode_t *zp;
struct inode *ip;
uint64_t mode;
uint64_t parent;
uint64_t tmp_gen;
uint64_t links;
uint64_t z_uid, z_gid;
uint64_t atime[2], mtime[2], ctime[2], btime[2];
uint64_t projid = ZFS_DEFAULT_PROJID;
sa_bulk_attr_t bulk[12];
int count = 0;
ASSERT(zfsvfs != NULL);
ip = new_inode(zfsvfs->z_sb);
if (ip == NULL)
return (NULL);
zp = ITOZ(ip);
ASSERT(zp->z_dirlocks == NULL);
ASSERT3P(zp->z_acl_cached, ==, NULL);
ASSERT3P(zp->z_xattr_cached, ==, NULL);
zp->z_unlinked = B_FALSE;
zp->z_atime_dirty = B_FALSE;
#if !defined(HAVE_FILEMAP_RANGE_HAS_PAGE)
zp->z_is_mapped = B_FALSE;
#endif
zp->z_is_ctldir = B_FALSE;
zp->z_suspended = B_FALSE;
zp->z_sa_hdl = NULL;
zp->z_mapcnt = 0;
zp->z_id = db->db_object;
zp->z_blksz = blksz;
zp->z_seq = 0x7A4653;
zp->z_sync_cnt = 0;
zp->z_sync_writes_cnt = 0;
zp->z_async_writes_cnt = 0;
zfs_znode_sa_init(zfsvfs, zp, db, obj_type, hdl);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, &tmp_gen, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
&parent, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, &z_uid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, &z_gid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &btime, 16);
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || tmp_gen == 0 ||
(dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
(zp->z_pflags & ZFS_PROJID) &&
sa_lookup(zp->z_sa_hdl, SA_ZPL_PROJID(zfsvfs), &projid, 8) != 0)) {
if (hdl == NULL)
sa_handle_destroy(zp->z_sa_hdl);
zp->z_sa_hdl = NULL;
goto error;
}
zp->z_projid = projid;
zp->z_mode = ip->i_mode = mode;
ip->i_generation = (uint32_t)tmp_gen;
ip->i_blkbits = SPA_MINBLOCKSHIFT;
set_nlink(ip, (uint32_t)links);
zfs_uid_write(ip, z_uid);
zfs_gid_write(ip, z_gid);
zfs_set_inode_flags(zp, ip);
/* Cache the xattr parent id */
if (zp->z_pflags & ZFS_XATTR)
zp->z_xattr_parent = parent;
ZFS_TIME_DECODE(&ip->i_atime, atime);
ZFS_TIME_DECODE(&ip->i_mtime, mtime);
ZFS_TIME_DECODE(&ip->i_ctime, ctime);
ZFS_TIME_DECODE(&zp->z_btime, btime);
ip->i_ino = zp->z_id;
zfs_znode_update_vfs(zp);
zfs_inode_set_ops(zfsvfs, ip);
/*
* The only way insert_inode_locked() can fail is if the ip->i_ino
* number is already hashed for this super block. This can never
* happen because the inode numbers map 1:1 with the object numbers.
*
* Exceptions include rolling back a mounted file system, either
* from the zfs rollback or zfs recv command.
*
* Active inodes are unhashed during the rollback, but since zrele
* can happen asynchronously, we can't guarantee they've been
* unhashed. This can cause hash collisions in unlinked drain
* processing so do not hash unlinked znodes.
*/
if (links > 0)
VERIFY3S(insert_inode_locked(ip), ==, 0);
mutex_enter(&zfsvfs->z_znodes_lock);
list_insert_tail(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes++;
mutex_exit(&zfsvfs->z_znodes_lock);
if (links > 0)
unlock_new_inode(ip);
return (zp);
error:
iput(ip);
return (NULL);
}
/*
* Safely mark an inode dirty. Inodes which are part of a read-only
* file system or snapshot may not be dirtied.
*/
void
zfs_mark_inode_dirty(struct inode *ip)
{
zfsvfs_t *zfsvfs = ITOZSB(ip);
if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os))
return;
mark_inode_dirty(ip);
}
static uint64_t empty_xattr;
static uint64_t pad[4];
static zfs_acl_phys_t acl_phys;
/*
* Create a new DMU object to hold a zfs znode.
*
* IN: dzp - parent directory for new znode
* vap - file attributes for new znode
* tx - dmu transaction id for zap operations
* cr - credentials of caller
* flag - flags:
* IS_ROOT_NODE - new object will be root
* IS_TMPFILE - new object is of O_TMPFILE
* IS_XATTR - new object is an attribute
* acl_ids - ACL related attributes
*
* OUT: zpp - allocated znode (set to dzp if IS_ROOT_NODE)
*
*/
void
zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids)
{
uint64_t crtime[2], atime[2], mtime[2], ctime[2];
uint64_t mode, size, links, parent, pflags;
uint64_t projid = ZFS_DEFAULT_PROJID;
uint64_t rdev = 0;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
dmu_buf_t *db;
inode_timespec_t now;
uint64_t gen, obj;
int bonuslen;
int dnodesize;
sa_handle_t *sa_hdl;
dmu_object_type_t obj_type;
sa_bulk_attr_t *sa_attrs;
int cnt = 0;
zfs_acl_locator_cb_t locate = { 0 };
znode_hold_t *zh;
if (zfsvfs->z_replay) {
obj = vap->va_nodeid;
now = vap->va_ctime; /* see zfs_replay_create() */
gen = vap->va_nblocks; /* ditto */
dnodesize = vap->va_fsid; /* ditto */
} else {
obj = 0;
gethrestime(&now);
gen = dmu_tx_get_txg(tx);
dnodesize = dmu_objset_dnodesize(zfsvfs->z_os);
}
if (dnodesize == 0)
dnodesize = DNODE_MIN_SIZE;
obj_type = zfsvfs->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
bonuslen = (obj_type == DMU_OT_SA) ?
DN_BONUS_SIZE(dnodesize) : ZFS_OLD_ZNODE_PHYS_SIZE;
/*
* Create a new DMU object.
*/
/*
* There's currently no mechanism for pre-reading the blocks that will
* be needed to allocate a new object, so we accept the small chance
* that there will be an i/o error and we will fail one of the
* assertions below.
*/
if (S_ISDIR(vap->va_mode)) {
if (zfsvfs->z_replay) {
VERIFY0(zap_create_claim_norm_dnsize(zfsvfs->z_os, obj,
zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
obj_type, bonuslen, dnodesize, tx));
} else {
obj = zap_create_norm_dnsize(zfsvfs->z_os,
zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
obj_type, bonuslen, dnodesize, tx);
}
} else {
if (zfsvfs->z_replay) {
VERIFY0(dmu_object_claim_dnsize(zfsvfs->z_os, obj,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
obj_type, bonuslen, dnodesize, tx));
} else {
obj = dmu_object_alloc_dnsize(zfsvfs->z_os,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
obj_type, bonuslen, dnodesize, tx);
}
}
zh = zfs_znode_hold_enter(zfsvfs, obj);
VERIFY0(sa_buf_hold(zfsvfs->z_os, obj, NULL, &db));
/*
* If this is the root, fix up the half-initialized parent pointer
* to reference the just-allocated physical data area.
*/
if (flag & IS_ROOT_NODE) {
dzp->z_id = obj;
}
/*
* If parent is an xattr, so am I.
*/
if (dzp->z_pflags & ZFS_XATTR) {
flag |= IS_XATTR;
}
if (zfsvfs->z_use_fuids)
pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
else
pflags = 0;
if (S_ISDIR(vap->va_mode)) {
size = 2; /* contents ("." and "..") */
links = 2;
} else {
size = 0;
links = (flag & IS_TMPFILE) ? 0 : 1;
}
if (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))
rdev = vap->va_rdev;
parent = dzp->z_id;
mode = acl_ids->z_mode;
if (flag & IS_XATTR)
pflags |= ZFS_XATTR;
if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode)) {
/*
* With ZFS_PROJID flag, we can easily know whether there is
* project ID stored on disk or not. See zfs_space_delta_cb().
*/
if (obj_type != DMU_OT_ZNODE &&
dmu_objset_projectquota_enabled(zfsvfs->z_os))
pflags |= ZFS_PROJID;
/*
* Inherit project ID from parent if required.
*/
projid = zfs_inherit_projid(dzp);
if (dzp->z_pflags & ZFS_PROJINHERIT)
pflags |= ZFS_PROJINHERIT;
}
/*
* No execs denied will be determined when zfs_mode_compute() is called.
*/
pflags |= acl_ids->z_aclp->z_hints &
(ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT|
ZFS_ACL_DEFAULTED|ZFS_ACL_PROTECTED);
ZFS_TIME_ENCODE(&now, crtime);
ZFS_TIME_ENCODE(&now, ctime);
if (vap->va_mask & ATTR_ATIME) {
ZFS_TIME_ENCODE(&vap->va_atime, atime);
} else {
ZFS_TIME_ENCODE(&now, atime);
}
if (vap->va_mask & ATTR_MTIME) {
ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
} else {
ZFS_TIME_ENCODE(&now, mtime);
}
/* Now add in all of the "SA" attributes */
VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
&sa_hdl));
/*
* Setup the array of attributes to be replaced/set on the new file
*
* order for DMU_OT_ZNODE is critical since it needs to be constructed
* in the old znode_phys_t format. Don't change this ordering
*/
sa_attrs = kmem_alloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
NULL, &atime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
NULL, &mtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
NULL, &ctime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
NULL, &crtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
NULL, &gen, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
NULL, &mode, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
NULL, &size, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
NULL, &parent, 8);
} else {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
NULL, &mode, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
NULL, &size, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
NULL, &gen, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs),
NULL, &acl_ids->z_fuid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs),
NULL, &acl_ids->z_fgid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
NULL, &parent, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
NULL, &pflags, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
NULL, &atime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
NULL, &mtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
NULL, &ctime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
NULL, &crtime, 16);
}
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zfsvfs), NULL,
&empty_xattr, 8);
} else if (dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
pflags & ZFS_PROJID) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PROJID(zfsvfs),
NULL, &projid, 8);
}
if (obj_type == DMU_OT_ZNODE ||
(S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zfsvfs),
NULL, &rdev, 8);
}
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
NULL, &pflags, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL,
&acl_ids->z_fuid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL,
&acl_ids->z_fgid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zfsvfs), NULL, pad,
sizeof (uint64_t) * 4);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
&acl_phys, sizeof (zfs_acl_phys_t));
} else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
&acl_ids->z_aclp->z_acl_count, 8);
locate.cb_aclp = acl_ids->z_aclp;
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zfsvfs),
zfs_acl_data_locator, &locate,
acl_ids->z_aclp->z_acl_bytes);
mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags,
acl_ids->z_fuid, acl_ids->z_fgid);
}
VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0);
if (!(flag & IS_ROOT_NODE)) {
/*
* The call to zfs_znode_alloc() may fail if memory is low
* via the call path: alloc_inode() -> inode_init_always() ->
* security_inode_alloc() -> inode_alloc_security(). Since
* the existing code is written such that zfs_mknode() can
* not fail retry until sufficient memory has been reclaimed.
*/
do {
*zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, sa_hdl);
} while (*zpp == NULL);
VERIFY(*zpp != NULL);
VERIFY(dzp != NULL);
} else {
/*
* If we are creating the root node, the "parent" we
* passed in is the znode for the root.
*/
*zpp = dzp;
(*zpp)->z_sa_hdl = sa_hdl;
}
(*zpp)->z_pflags = pflags;
(*zpp)->z_mode = ZTOI(*zpp)->i_mode = mode;
(*zpp)->z_dnodesize = dnodesize;
(*zpp)->z_projid = projid;
if (obj_type == DMU_OT_ZNODE ||
acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) {
VERIFY0(zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx));
}
kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
zfs_znode_hold_exit(zfsvfs, zh);
}
/*
* Update in-core attributes. It is assumed the caller will be doing an
* sa_bulk_update to push the changes out.
*/
void
zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
{
xoptattr_t *xoap;
boolean_t update_inode = B_FALSE;
xoap = xva_getxoptattr(xvap);
ASSERT(xoap);
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
uint64_t times[2];
ZFS_TIME_ENCODE(&xoap->xoa_createtime, times);
(void) sa_update(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
&times, sizeof (times), tx);
XVA_SET_RTN(xvap, XAT_CREATETIME);
}
if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
ZFS_ATTR_SET(zp, ZFS_READONLY, xoap->xoa_readonly,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_READONLY);
}
if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
ZFS_ATTR_SET(zp, ZFS_HIDDEN, xoap->xoa_hidden,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_HIDDEN);
}
if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
ZFS_ATTR_SET(zp, ZFS_SYSTEM, xoap->xoa_system,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_SYSTEM);
}
if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
ZFS_ATTR_SET(zp, ZFS_ARCHIVE, xoap->xoa_archive,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_ARCHIVE);
}
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
ZFS_ATTR_SET(zp, ZFS_IMMUTABLE, xoap->xoa_immutable,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_IMMUTABLE);
update_inode = B_TRUE;
}
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
ZFS_ATTR_SET(zp, ZFS_NOUNLINK, xoap->xoa_nounlink,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_NOUNLINK);
}
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
ZFS_ATTR_SET(zp, ZFS_APPENDONLY, xoap->xoa_appendonly,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_APPENDONLY);
update_inode = B_TRUE;
}
if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
ZFS_ATTR_SET(zp, ZFS_NODUMP, xoap->xoa_nodump,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_NODUMP);
}
if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
ZFS_ATTR_SET(zp, ZFS_OPAQUE, xoap->xoa_opaque,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_OPAQUE);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
ZFS_ATTR_SET(zp, ZFS_AV_QUARANTINED,
xoap->xoa_av_quarantined, zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
ZFS_ATTR_SET(zp, ZFS_AV_MODIFIED, xoap->xoa_av_modified,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
zfs_sa_set_scanstamp(zp, xvap, tx);
XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP);
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
ZFS_ATTR_SET(zp, ZFS_REPARSE, xoap->xoa_reparse,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_REPARSE);
}
if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
ZFS_ATTR_SET(zp, ZFS_OFFLINE, xoap->xoa_offline,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_OFFLINE);
}
if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
ZFS_ATTR_SET(zp, ZFS_SPARSE, xoap->xoa_sparse,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_SPARSE);
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
ZFS_ATTR_SET(zp, ZFS_PROJINHERIT, xoap->xoa_projinherit,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_PROJINHERIT);
}
if (update_inode)
zfs_set_inode_flags(zp, ZTOI(zp));
}
int
zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
{
dmu_object_info_t doi;
dmu_buf_t *db;
znode_t *zp;
znode_hold_t *zh;
int err;
sa_handle_t *hdl;
*zpp = NULL;
again:
zh = zfs_znode_hold_enter(zfsvfs, obj_num);
err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
if (err) {
zfs_znode_hold_exit(zfsvfs, zh);
return (err);
}
dmu_object_info_from_db(db, &doi);
if (doi.doi_bonus_type != DMU_OT_SA &&
(doi.doi_bonus_type != DMU_OT_ZNODE ||
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EINVAL));
}
hdl = dmu_buf_get_user(db);
if (hdl != NULL) {
zp = sa_get_userdata(hdl);
/*
* Since "SA" does immediate eviction we
* should never find a sa handle that doesn't
* know about the znode.
*/
ASSERT3P(zp, !=, NULL);
mutex_enter(&zp->z_lock);
ASSERT3U(zp->z_id, ==, obj_num);
/*
* If zp->z_unlinked is set, the znode is already marked
* for deletion and should not be discovered. Check this
* after checking igrab() due to fsetxattr() & O_TMPFILE.
*
* If igrab() returns NULL the VFS has independently
* determined the inode should be evicted and has
* called iput_final() to start the eviction process.
* The SA handle is still valid but because the VFS
* requires that the eviction succeed we must drop
* our locks and references to allow the eviction to
* complete. The zfs_zget() may then be retried.
*
* This unlikely case could be optimized by registering
* a sops->drop_inode() callback. The callback would
* need to detect the active SA hold thereby informing
* the VFS that this inode should not be evicted.
*/
if (igrab(ZTOI(zp)) == NULL) {
if (zp->z_unlinked)
err = SET_ERROR(ENOENT);
else
err = SET_ERROR(EAGAIN);
} else {
*zpp = zp;
err = 0;
}
mutex_exit(&zp->z_lock);
sa_buf_rele(db, NULL);
zfs_znode_hold_exit(zfsvfs, zh);
if (err == EAGAIN) {
/* inode might need this to finish evict */
cond_resched();
goto again;
}
return (err);
}
/*
* Not found create new znode/vnode but only if file exists.
*
* There is a small window where zfs_vget() could
* find this object while a file create is still in
* progress. This is checked for in zfs_znode_alloc()
*
* if zfs_znode_alloc() fails it will drop the hold on the
* bonus buffer.
*/
zp = zfs_znode_alloc(zfsvfs, db, doi.doi_data_block_size,
doi.doi_bonus_type, NULL);
if (zp == NULL) {
err = SET_ERROR(ENOENT);
} else {
*zpp = zp;
}
zfs_znode_hold_exit(zfsvfs, zh);
return (err);
}
int
zfs_rezget(znode_t *zp)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_object_info_t doi;
dmu_buf_t *db;
uint64_t obj_num = zp->z_id;
uint64_t mode;
uint64_t links;
sa_bulk_attr_t bulk[11];
int err;
int count = 0;
uint64_t gen;
uint64_t z_uid, z_gid;
uint64_t atime[2], mtime[2], ctime[2], btime[2];
uint64_t projid = ZFS_DEFAULT_PROJID;
znode_hold_t *zh;
/*
* skip ctldir, otherwise they will always get invalidated. This will
* cause funny behaviour for the mounted snapdirs. Especially for
* Linux >= 3.18, d_invalidate will detach the mountpoint and prevent
* anyone automount it again as long as someone is still using the
* detached mount.
*/
if (zp->z_is_ctldir)
return (0);
zh = zfs_znode_hold_enter(zfsvfs, obj_num);
mutex_enter(&zp->z_acl_lock);
if (zp->z_acl_cached) {
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = NULL;
}
mutex_exit(&zp->z_acl_lock);
rw_enter(&zp->z_xattr_lock, RW_WRITER);
if (zp->z_xattr_cached) {
nvlist_free(zp->z_xattr_cached);
zp->z_xattr_cached = NULL;
}
rw_exit(&zp->z_xattr_lock);
ASSERT(zp->z_sa_hdl == NULL);
err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
if (err) {
zfs_znode_hold_exit(zfsvfs, zh);
return (err);
}
dmu_object_info_from_db(db, &doi);
if (doi.doi_bonus_type != DMU_OT_SA &&
(doi.doi_bonus_type != DMU_OT_ZNODE ||
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EINVAL));
}
zfs_znode_sa_init(zfsvfs, zp, db, doi.doi_bonus_type, NULL);
/* reload cached values */
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
&gen, sizeof (gen));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, sizeof (zp->z_size));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
&links, sizeof (links));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&z_uid, sizeof (z_uid));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
&z_gid, sizeof (z_gid));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&mode, sizeof (mode));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
&atime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
&mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &btime, 16);
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EIO));
}
if (dmu_objset_projectquota_enabled(zfsvfs->z_os)) {
err = sa_lookup(zp->z_sa_hdl, SA_ZPL_PROJID(zfsvfs),
&projid, 8);
if (err != 0 && err != ENOENT) {
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(err));
}
}
zp->z_projid = projid;
zp->z_mode = ZTOI(zp)->i_mode = mode;
zfs_uid_write(ZTOI(zp), z_uid);
zfs_gid_write(ZTOI(zp), z_gid);
ZFS_TIME_DECODE(&ZTOI(zp)->i_atime, atime);
ZFS_TIME_DECODE(&ZTOI(zp)->i_mtime, mtime);
ZFS_TIME_DECODE(&ZTOI(zp)->i_ctime, ctime);
ZFS_TIME_DECODE(&zp->z_btime, btime);
if ((uint32_t)gen != ZTOI(zp)->i_generation) {
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EIO));
}
set_nlink(ZTOI(zp), (uint32_t)links);
zfs_set_inode_flags(zp, ZTOI(zp));
zp->z_blksz = doi.doi_data_block_size;
zp->z_atime_dirty = B_FALSE;
zfs_znode_update_vfs(zp);
/*
* If the file has zero links, then it has been unlinked on the send
* side and it must be in the received unlinked set.
* We call zfs_znode_dmu_fini() now to prevent any accesses to the
* stale data and to prevent automatic removal of the file in
* zfs_zinactive(). The file will be removed either when it is removed
* on the send side and the next incremental stream is received or
* when the unlinked set gets processed.
*/
zp->z_unlinked = (ZTOI(zp)->i_nlink == 0);
if (zp->z_unlinked)
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
return (0);
}
void
zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
objset_t *os = zfsvfs->z_os;
uint64_t obj = zp->z_id;
uint64_t acl_obj = zfs_external_acl(zp);
znode_hold_t *zh;
zh = zfs_znode_hold_enter(zfsvfs, obj);
if (acl_obj) {
VERIFY(!zp->z_is_sa);
VERIFY(0 == dmu_object_free(os, acl_obj, tx));
}
VERIFY(0 == dmu_object_free(os, obj, tx));
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
}
void
zfs_zinactive(znode_t *zp)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
uint64_t z_id = zp->z_id;
znode_hold_t *zh;
ASSERT(zp->z_sa_hdl);
/*
* Don't allow a zfs_zget() while were trying to release this znode.
*/
zh = zfs_znode_hold_enter(zfsvfs, z_id);
mutex_enter(&zp->z_lock);
/*
* If this was the last reference to a file with no links, remove
* the file from the file system unless the file system is mounted
* read-only. That can happen, for example, if the file system was
* originally read-write, the file was opened, then unlinked and
* the file system was made read-only before the file was finally
* closed. The file will remain in the unlinked set.
*/
if (zp->z_unlinked) {
ASSERT(!zfsvfs->z_issnap);
if (!zfs_is_readonly(zfsvfs) && !zfs_unlink_suspend_progress) {
mutex_exit(&zp->z_lock);
zfs_znode_hold_exit(zfsvfs, zh);
zfs_rmnode(zp);
return;
}
}
mutex_exit(&zp->z_lock);
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
}
#if defined(HAVE_INODE_TIMESPEC64_TIMES)
#define zfs_compare_timespec timespec64_compare
#else
#define zfs_compare_timespec timespec_compare
#endif
/*
* Determine whether the znode's atime must be updated. The logic mostly
* duplicates the Linux kernel's relatime_need_update() functionality.
* This function is only called if the underlying filesystem actually has
* atime updates enabled.
*/
boolean_t
zfs_relatime_need_update(const struct inode *ip)
{
inode_timespec_t now;
gethrestime(&now);
/*
* In relatime mode, only update the atime if the previous atime
* is earlier than either the ctime or mtime or if at least a day
* has passed since the last update of atime.
*/
if (zfs_compare_timespec(&ip->i_mtime, &ip->i_atime) >= 0)
return (B_TRUE);
if (zfs_compare_timespec(&ip->i_ctime, &ip->i_atime) >= 0)
return (B_TRUE);
if ((hrtime_t)now.tv_sec - (hrtime_t)ip->i_atime.tv_sec >= 24*60*60)
return (B_TRUE);
return (B_FALSE);
}
/*
* Prepare to update znode time stamps.
*
* IN: zp - znode requiring timestamp update
* flag - ATTR_MTIME, ATTR_CTIME flags
*
* OUT: zp - z_seq
* mtime - new mtime
* ctime - new ctime
*
* Note: We don't update atime here, because we rely on Linux VFS to do
* atime updating.
*/
void
zfs_tstamp_update_setup(znode_t *zp, uint_t flag, uint64_t mtime[2],
uint64_t ctime[2])
{
inode_timespec_t now;
gethrestime(&now);
zp->z_seq++;
if (flag & ATTR_MTIME) {
ZFS_TIME_ENCODE(&now, mtime);
ZFS_TIME_DECODE(&(ZTOI(zp)->i_mtime), mtime);
if (ZTOZSB(zp)->z_use_fuids) {
zp->z_pflags |= (ZFS_ARCHIVE |
ZFS_AV_MODIFIED);
}
}
if (flag & ATTR_CTIME) {
ZFS_TIME_ENCODE(&now, ctime);
ZFS_TIME_DECODE(&(ZTOI(zp)->i_ctime), ctime);
if (ZTOZSB(zp)->z_use_fuids)
zp->z_pflags |= ZFS_ARCHIVE;
}
}
/*
* Grow the block size for a file.
*
* IN: zp - znode of file to free data in.
* size - requested block size
* tx - open transaction.
*
* NOTE: this function assumes that the znode is write locked.
*/
void
zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx)
{
int error;
u_longlong_t dummy;
if (size <= zp->z_blksz)
return;
/*
* If the file size is already greater than the current blocksize,
* we will not grow. If there is more than one block in a file,
* the blocksize cannot change.
*/
if (zp->z_blksz && zp->z_size > zp->z_blksz)
return;
error = dmu_object_set_blocksize(ZTOZSB(zp)->z_os, zp->z_id,
size, 0, tx);
if (error == ENOTSUP)
return;
ASSERT0(error);
/* What blocksize did we actually get? */
dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &zp->z_blksz, &dummy);
}
/*
* Increase the file length
*
* IN: zp - znode of file to free data in.
* end - new end-of-file
*
* RETURN: 0 on success, error code on failure
*/
static int
zfs_extend(znode_t *zp, uint64_t end)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_tx_t *tx;
zfs_locked_range_t *lr;
uint64_t newblksz;
int error;
/*
* We will change zp_size, lock the whole file.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
/*
* Nothing to do if file already at desired length.
*/
if (end <= zp->z_size) {
zfs_rangelock_exit(lr);
return (0);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
if (end > zp->z_blksz &&
(!ISP2(zp->z_blksz) || zp->z_blksz < zfsvfs->z_max_blksz)) {
/*
* We are growing the file past the current block size.
*/
if (zp->z_blksz > ZTOZSB(zp)->z_max_blksz) {
/*
* File's blocksize is already larger than the
* "recordsize" property. Only let it grow to
* the next power of 2.
*/
ASSERT(!ISP2(zp->z_blksz));
newblksz = MIN(end, 1 << highbit64(zp->z_blksz));
} else {
newblksz = MIN(end, ZTOZSB(zp)->z_max_blksz);
}
dmu_tx_hold_write(tx, zp->z_id, 0, newblksz);
} else {
newblksz = 0;
}
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
zfs_rangelock_exit(lr);
return (error);
}
if (newblksz)
zfs_grow_blocksize(zp, newblksz, tx);
zp->z_size = end;
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
&zp->z_size, sizeof (zp->z_size), tx));
zfs_rangelock_exit(lr);
dmu_tx_commit(tx);
return (0);
}
/*
* zfs_zero_partial_page - Modeled after update_pages() but
* with different arguments and semantics for use by zfs_freesp().
*
* Zeroes a piece of a single page cache entry for zp at offset
* start and length len.
*
* Caller must acquire a range lock on the file for the region
* being zeroed in order that the ARC and page cache stay in sync.
*/
static void
zfs_zero_partial_page(znode_t *zp, uint64_t start, uint64_t len)
{
struct address_space *mp = ZTOI(zp)->i_mapping;
struct page *pp;
int64_t off;
void *pb;
ASSERT((start & PAGE_MASK) == ((start + len - 1) & PAGE_MASK));
off = start & (PAGE_SIZE - 1);
start &= PAGE_MASK;
pp = find_lock_page(mp, start >> PAGE_SHIFT);
if (pp) {
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
pb = kmap(pp);
memset(pb + off, 0, len);
kunmap(pp);
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
mark_page_accessed(pp);
SetPageUptodate(pp);
ClearPageError(pp);
unlock_page(pp);
put_page(pp);
}
}
/*
* Free space in a file.
*
* IN: zp - znode of file to free data in.
* off - start of section to free.
* len - length of section to free.
*
* RETURN: 0 on success, error code on failure
*/
static int
zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zfs_locked_range_t *lr;
int error;
/*
* Lock the range being freed.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, off, len, RL_WRITER);
/*
* Nothing to do if file already at desired length.
*/
if (off >= zp->z_size) {
zfs_rangelock_exit(lr);
return (0);
}
if (off + len > zp->z_size)
len = zp->z_size - off;
error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, off, len);
/*
* Zero partial page cache entries. This must be done under a
* range lock in order to keep the ARC and page cache in sync.
*/
if (zn_has_cached_data(zp, off, off + len - 1)) {
loff_t first_page, last_page, page_len;
loff_t first_page_offset, last_page_offset;
/* first possible full page in hole */
first_page = (off + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* last page of hole */
last_page = (off + len) >> PAGE_SHIFT;
/* offset of first_page */
first_page_offset = first_page << PAGE_SHIFT;
/* offset of last_page */
last_page_offset = last_page << PAGE_SHIFT;
/* truncate whole pages */
if (last_page_offset > first_page_offset) {
truncate_inode_pages_range(ZTOI(zp)->i_mapping,
first_page_offset, last_page_offset - 1);
}
/* truncate sub-page ranges */
if (first_page > last_page) {
/* entire punched area within a single page */
zfs_zero_partial_page(zp, off, len);
} else {
/* beginning of punched area at the end of a page */
page_len = first_page_offset - off;
if (page_len > 0)
zfs_zero_partial_page(zp, off, page_len);
/* end of punched area at the beginning of a page */
page_len = off + len - last_page_offset;
if (page_len > 0)
zfs_zero_partial_page(zp, last_page_offset,
page_len);
}
}
zfs_rangelock_exit(lr);
return (error);
}
/*
* Truncate a file
*
* IN: zp - znode of file to free data in.
* end - new end-of-file.
*
* RETURN: 0 on success, error code on failure
*/
static int
zfs_trunc(znode_t *zp, uint64_t end)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_tx_t *tx;
zfs_locked_range_t *lr;
int error;
sa_bulk_attr_t bulk[2];
int count = 0;
/*
* We will change zp_size, lock the whole file.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
/*
* Nothing to do if file already at desired length.
*/
if (end >= zp->z_size) {
zfs_rangelock_exit(lr);
return (0);
}
error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end,
DMU_OBJECT_END);
if (error) {
zfs_rangelock_exit(lr);
return (error);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
zfs_rangelock_exit(lr);
return (error);
}
zp->z_size = end;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
NULL, &zp->z_size, sizeof (zp->z_size));
if (end == 0) {
zp->z_pflags &= ~ZFS_SPARSE;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &zp->z_pflags, 8);
}
VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);
dmu_tx_commit(tx);
zfs_rangelock_exit(lr);
return (0);
}
/*
* Free space in a file
*
* IN: zp - znode of file to free data in.
* off - start of range
* len - end of range (0 => EOF)
* flag - current file open mode flags.
* log - TRUE if this action should be logged
*
* RETURN: 0 on success, error code on failure
*/
int
zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
{
dmu_tx_t *tx;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zilog_t *zilog = zfsvfs->z_log;
uint64_t mode;
uint64_t mtime[2], ctime[2];
sa_bulk_attr_t bulk[3];
int count = 0;
int error;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), &mode,
sizeof (mode))) != 0)
return (error);
if (off > zp->z_size) {
error = zfs_extend(zp, off+len);
if (error == 0 && log)
goto log;
goto out;
}
if (len == 0) {
error = zfs_trunc(zp, off);
} else {
if ((error = zfs_free_range(zp, off, len)) == 0 &&
off + len > zp->z_size)
error = zfs_extend(zp, off+len);
}
if (error || !log)
goto out;
log:
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
goto out;
}
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &zp->z_pflags, 8);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
ASSERT(error == 0);
zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
dmu_tx_commit(tx);
zfs_znode_update_vfs(zp);
error = 0;
out:
/*
* Truncate the page cache - for file truncate operations, use
* the purpose-built API for truncations. For punching operations,
* the truncation is handled under a range lock in zfs_free_range.
*/
if (len == 0)
truncate_setsize(ZTOI(zp), off);
return (error);
}
void
zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
{
struct super_block *sb;
zfsvfs_t *zfsvfs;
uint64_t moid, obj, sa_obj, version;
uint64_t sense = ZFS_CASE_SENSITIVE;
uint64_t norm = 0;
nvpair_t *elem;
int size;
int error;
int i;
znode_t *rootzp = NULL;
vattr_t vattr;
znode_t *zp;
zfs_acl_ids_t acl_ids;
/*
* First attempt to create master node.
*/
/*
* In an empty objset, there are no blocks to read and thus
* there can be no i/o errors (which we assert below).
*/
moid = MASTER_NODE_OBJ;
error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE,
DMU_OT_NONE, 0, tx);
ASSERT(error == 0);
/*
* Set starting attributes.
*/
version = zfs_zpl_version_map(spa_version(dmu_objset_spa(os)));
elem = NULL;
while ((elem = nvlist_next_nvpair(zplprops, elem)) != NULL) {
/* For the moment we expect all zpl props to be uint64_ts */
uint64_t val;
const char *name;
ASSERT(nvpair_type(elem) == DATA_TYPE_UINT64);
VERIFY(nvpair_value_uint64(elem, &val) == 0);
name = nvpair_name(elem);
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) {
if (val < version)
version = val;
} else {
error = zap_update(os, moid, name, 8, 1, &val, tx);
}
ASSERT(error == 0);
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0)
norm = val;
else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0)
sense = val;
}
ASSERT(version != 0);
error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx);
ASSERT(error == 0);
/*
* Create zap object used for SA attribute registration
*/
if (version >= ZPL_VERSION_SA) {
sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
DMU_OT_NONE, 0, tx);
error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
ASSERT(error == 0);
} else {
sa_obj = 0;
}
/*
* Create a delete queue.
*/
obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx);
error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx);
ASSERT(error == 0);
/*
* Create root znode. Create minimal znode/inode/zfsvfs/sb
* to allow zfs_mknode to work.
*/
vattr.va_mask = ATTR_MODE|ATTR_UID|ATTR_GID;
vattr.va_mode = S_IFDIR|0755;
vattr.va_uid = crgetuid(cr);
vattr.va_gid = crgetgid(cr);
rootzp = kmem_cache_alloc(znode_cache, KM_SLEEP);
rootzp->z_unlinked = B_FALSE;
rootzp->z_atime_dirty = B_FALSE;
rootzp->z_is_sa = USE_SA(version, os);
rootzp->z_pflags = 0;
zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
zfsvfs->z_os = os;
zfsvfs->z_parent = zfsvfs;
zfsvfs->z_version = version;
zfsvfs->z_use_fuids = USE_FUIDS(version, os);
zfsvfs->z_use_sa = USE_SA(version, os);
zfsvfs->z_norm = norm;
sb = kmem_zalloc(sizeof (struct super_block), KM_SLEEP);
sb->s_fs_info = zfsvfs;
ZTOI(rootzp)->i_sb = sb;
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
&zfsvfs->z_attr_table);
ASSERT(error == 0);
/*
* Fold case on file systems that are always or sometimes case
* insensitive.
*/
if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED)
zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
offsetof(znode_t, z_link_node));
size = MIN(1 << (highbit64(zfs_object_mutex_size)-1), ZFS_OBJ_MTX_MAX);
zfsvfs->z_hold_size = size;
zfsvfs->z_hold_trees = vmem_zalloc(sizeof (avl_tree_t) * size,
KM_SLEEP);
zfsvfs->z_hold_locks = vmem_zalloc(sizeof (kmutex_t) * size, KM_SLEEP);
for (i = 0; i != size; i++) {
avl_create(&zfsvfs->z_hold_trees[i], zfs_znode_hold_compare,
sizeof (znode_hold_t), offsetof(znode_hold_t, zh_node));
mutex_init(&zfsvfs->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL);
}
VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
cr, NULL, &acl_ids, zfs_init_idmap));
zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids);
ASSERT3P(zp, ==, rootzp);
error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);
ASSERT(error == 0);
zfs_acl_ids_free(&acl_ids);
atomic_set(&ZTOI(rootzp)->i_count, 0);
sa_handle_destroy(rootzp->z_sa_hdl);
kmem_cache_free(znode_cache, rootzp);
for (i = 0; i != size; i++) {
avl_destroy(&zfsvfs->z_hold_trees[i]);
mutex_destroy(&zfsvfs->z_hold_locks[i]);
}
mutex_destroy(&zfsvfs->z_znodes_lock);
vmem_free(zfsvfs->z_hold_trees, sizeof (avl_tree_t) * size);
vmem_free(zfsvfs->z_hold_locks, sizeof (kmutex_t) * size);
kmem_free(sb, sizeof (struct super_block));
kmem_free(zfsvfs, sizeof (zfsvfs_t));
}
#endif /* _KERNEL */
static int
zfs_sa_setup(objset_t *osp, sa_attr_type_t **sa_table)
{
uint64_t sa_obj = 0;
int error;
error = zap_lookup(osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj);
if (error != 0 && error != ENOENT)
return (error);
error = sa_setup(osp, sa_obj, zfs_attr_table, ZPL_END, sa_table);
return (error);
}
static int
zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp,
dmu_buf_t **db, const void *tag)
{
dmu_object_info_t doi;
int error;
if ((error = sa_buf_hold(osp, obj, tag, db)) != 0)
return (error);
dmu_object_info_from_db(*db, &doi);
if ((doi.doi_bonus_type != DMU_OT_SA &&
doi.doi_bonus_type != DMU_OT_ZNODE) ||
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t))) {
sa_buf_rele(*db, tag);
return (SET_ERROR(ENOTSUP));
}
error = sa_handle_get(osp, obj, NULL, SA_HDL_PRIVATE, hdlp);
if (error != 0) {
sa_buf_rele(*db, tag);
return (error);
}
return (0);
}
static void
zfs_release_sa_handle(sa_handle_t *hdl, dmu_buf_t *db, const void *tag)
{
sa_handle_destroy(hdl);
sa_buf_rele(db, tag);
}
/*
* Given an object number, return its parent object number and whether
* or not the object is an extended attribute directory.
*/
static int
zfs_obj_to_pobj(objset_t *osp, sa_handle_t *hdl, sa_attr_type_t *sa_table,
uint64_t *pobjp, int *is_xattrdir)
{
uint64_t parent;
uint64_t pflags;
uint64_t mode;
uint64_t parent_mode;
sa_bulk_attr_t bulk[3];
sa_handle_t *sa_hdl;
dmu_buf_t *sa_db;
int count = 0;
int error;
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_PARENT], NULL,
&parent, sizeof (parent));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_FLAGS], NULL,
&pflags, sizeof (pflags));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
&mode, sizeof (mode));
if ((error = sa_bulk_lookup(hdl, bulk, count)) != 0)
return (error);
/*
* When a link is removed its parent pointer is not changed and will
* be invalid. There are two cases where a link is removed but the
* file stays around, when it goes to the delete queue and when there
* are additional links.
*/
error = zfs_grab_sa_handle(osp, parent, &sa_hdl, &sa_db, FTAG);
if (error != 0)
return (error);
error = sa_lookup(sa_hdl, ZPL_MODE, &parent_mode, sizeof (parent_mode));
zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
if (error != 0)
return (error);
*is_xattrdir = ((pflags & ZFS_XATTR) != 0) && S_ISDIR(mode);
/*
* Extended attributes can be applied to files, directories, etc.
* Otherwise the parent must be a directory.
*/
if (!*is_xattrdir && !S_ISDIR(parent_mode))
return (SET_ERROR(EINVAL));
*pobjp = parent;
return (0);
}
/*
* Given an object number, return some zpl level statistics
*/
static int
zfs_obj_to_stats_impl(sa_handle_t *hdl, sa_attr_type_t *sa_table,
zfs_stat_t *sb)
{
sa_bulk_attr_t bulk[4];
int count = 0;
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
&sb->zs_mode, sizeof (sb->zs_mode));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_GEN], NULL,
&sb->zs_gen, sizeof (sb->zs_gen));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_LINKS], NULL,
&sb->zs_links, sizeof (sb->zs_links));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_CTIME], NULL,
&sb->zs_ctime, sizeof (sb->zs_ctime));
return (sa_bulk_lookup(hdl, bulk, count));
}
static int
zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
sa_attr_type_t *sa_table, char *buf, int len)
{
sa_handle_t *sa_hdl;
sa_handle_t *prevhdl = NULL;
dmu_buf_t *prevdb = NULL;
dmu_buf_t *sa_db = NULL;
char *path = buf + len - 1;
int error;
*path = '\0';
sa_hdl = hdl;
uint64_t deleteq_obj;
VERIFY0(zap_lookup(osp, MASTER_NODE_OBJ,
ZFS_UNLINKED_SET, sizeof (uint64_t), 1, &deleteq_obj));
error = zap_lookup_int(osp, deleteq_obj, obj);
if (error == 0) {
return (ESTALE);
} else if (error != ENOENT) {
return (error);
}
for (;;) {
uint64_t pobj = 0;
char component[MAXNAMELEN + 2];
size_t complen;
int is_xattrdir = 0;
if (prevdb) {
ASSERT(prevhdl != NULL);
zfs_release_sa_handle(prevhdl, prevdb, FTAG);
}
if ((error = zfs_obj_to_pobj(osp, sa_hdl, sa_table, &pobj,
&is_xattrdir)) != 0)
break;
if (pobj == obj) {
if (path[0] != '/')
*--path = '/';
break;
}
component[0] = '/';
if (is_xattrdir) {
strcpy(component + 1, "<xattrdir>");
} else {
error = zap_value_search(osp, pobj, obj,
ZFS_DIRENT_OBJ(-1ULL), component + 1);
if (error != 0)
break;
}
complen = strlen(component);
path -= complen;
ASSERT(path >= buf);
memcpy(path, component, complen);
obj = pobj;
if (sa_hdl != hdl) {
prevhdl = sa_hdl;
prevdb = sa_db;
}
error = zfs_grab_sa_handle(osp, obj, &sa_hdl, &sa_db, FTAG);
if (error != 0) {
sa_hdl = prevhdl;
sa_db = prevdb;
break;
}
}
if (sa_hdl != NULL && sa_hdl != hdl) {
ASSERT(sa_db != NULL);
zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
}
if (error == 0)
(void) memmove(buf, path, buf + len - path);
return (error);
}
int
zfs_obj_to_path(objset_t *osp, uint64_t obj, char *buf, int len)
{
sa_attr_type_t *sa_table;
sa_handle_t *hdl;
dmu_buf_t *db;
int error;
error = zfs_sa_setup(osp, &sa_table);
if (error != 0)
return (error);
error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
if (error != 0)
return (error);
error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
zfs_release_sa_handle(hdl, db, FTAG);
return (error);
}
int
zfs_obj_to_stats(objset_t *osp, uint64_t obj, zfs_stat_t *sb,
char *buf, int len)
{
char *path = buf + len - 1;
sa_attr_type_t *sa_table;
sa_handle_t *hdl;
dmu_buf_t *db;
int error;
*path = '\0';
error = zfs_sa_setup(osp, &sa_table);
if (error != 0)
return (error);
error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
if (error != 0)
return (error);
error = zfs_obj_to_stats_impl(hdl, sa_table, sb);
if (error != 0) {
zfs_release_sa_handle(hdl, db, FTAG);
return (error);
}
error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
zfs_release_sa_handle(hdl, db, FTAG);
return (error);
}
/*
* Read a property stored within the master node.
*/
int
zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value)
{
uint64_t *cached_copy = NULL;
/*
* Figure out where in the objset_t the cached copy would live, if it
* is available for the requested property.
*/
if (os != NULL) {
switch (prop) {
case ZFS_PROP_VERSION:
cached_copy = &os->os_version;
break;
case ZFS_PROP_NORMALIZE:
cached_copy = &os->os_normalization;
break;
case ZFS_PROP_UTF8ONLY:
cached_copy = &os->os_utf8only;
break;
case ZFS_PROP_CASE:
cached_copy = &os->os_casesensitivity;
break;
default:
break;
}
}
if (cached_copy != NULL && *cached_copy != OBJSET_PROP_UNINITIALIZED) {
*value = *cached_copy;
return (0);
}
/*
* If the property wasn't cached, look up the file system's value for
* the property. For the version property, we look up a slightly
* different string.
*/
const char *pname;
int error = ENOENT;
if (prop == ZFS_PROP_VERSION)
pname = ZPL_VERSION_STR;
else
pname = zfs_prop_to_name(prop);
if (os != NULL) {
ASSERT3U(os->os_phys->os_type, ==, DMU_OST_ZFS);
error = zap_lookup(os, MASTER_NODE_OBJ, pname, 8, 1, value);
}
if (error == ENOENT) {
/* No value set, use the default value */
switch (prop) {
case ZFS_PROP_VERSION:
*value = ZPL_VERSION;
break;
case ZFS_PROP_NORMALIZE:
case ZFS_PROP_UTF8ONLY:
*value = 0;
break;
case ZFS_PROP_CASE:
*value = ZFS_CASE_SENSITIVE;
break;
case ZFS_PROP_ACLTYPE:
*value = ZFS_ACLTYPE_OFF;
break;
default:
return (error);
}
error = 0;
}
/*
* If one of the methods for getting the property value above worked,
* copy it into the objset_t's cache.
*/
if (error == 0 && cached_copy != NULL) {
*cached_copy = *value;
}
return (error);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(zfs_create_fs);
EXPORT_SYMBOL(zfs_obj_to_path);
/* CSTYLED */
module_param(zfs_object_mutex_size, uint, 0644);
MODULE_PARM_DESC(zfs_object_mutex_size, "Size of znode hold array");
module_param(zfs_unlink_suspend_progress, int, 0644);
MODULE_PARM_DESC(zfs_unlink_suspend_progress, "Set to prevent async unlinks "
"(debug - leaks space into the unlinked set)");
#endif
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c
index e690525d3cd4..73526db731c4 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c
@@ -1,1365 +1,1392 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2011, Lawrence Livermore National Security, LLC.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
*/
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
#endif
#include <linux/fs.h>
#include <sys/file.h>
#include <sys/dmu_objset.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_project.h>
#if defined(HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS) || \
defined(HAVE_VFS_FILEMAP_DIRTY_FOLIO)
#include <linux/pagemap.h>
#endif
#ifdef HAVE_FILE_FADVISE
#include <linux/fadvise.h>
#endif
#ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO
#include <linux/writeback.h>
#endif
/*
* When using fallocate(2) to preallocate space, inflate the requested
* capacity check by 10% to account for the required metadata blocks.
*/
static unsigned int zfs_fallocate_reserve_percent = 110;
static int
zpl_open(struct inode *ip, struct file *filp)
{
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
error = generic_file_open(ip, filp);
if (error)
return (error);
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfs_open(ip, filp->f_mode, filp->f_flags, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
static int
zpl_release(struct inode *ip, struct file *filp)
{
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
cookie = spl_fstrans_mark();
if (ITOZ(ip)->z_atime_dirty)
zfs_mark_inode_dirty(ip);
crhold(cr);
error = -zfs_close(ip, filp->f_flags, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
static int
zpl_iterate(struct file *filp, zpl_dir_context_t *ctx)
{
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfs_readdir(file_inode(filp), ctx, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
#if !defined(HAVE_VFS_ITERATE) && !defined(HAVE_VFS_ITERATE_SHARED)
static int
zpl_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
zpl_dir_context_t ctx =
ZPL_DIR_CONTEXT_INIT(dirent, filldir, filp->f_pos);
int error;
error = zpl_iterate(filp, &ctx);
filp->f_pos = ctx.pos;
return (error);
}
#endif /* !HAVE_VFS_ITERATE && !HAVE_VFS_ITERATE_SHARED */
#if defined(HAVE_FSYNC_WITHOUT_DENTRY)
/*
* Linux 2.6.35 - 3.0 API,
* As of 2.6.35 the dentry argument to the fops->fsync() hook was deemed
* redundant. The dentry is still accessible via filp->f_path.dentry,
* and we are guaranteed that filp will never be NULL.
*/
static int
zpl_fsync(struct file *filp, int datasync)
{
struct inode *inode = filp->f_mapping->host;
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfs_fsync(ITOZ(inode), datasync, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
#ifdef HAVE_FILE_AIO_FSYNC
static int
zpl_aio_fsync(struct kiocb *kiocb, int datasync)
{
return (zpl_fsync(kiocb->ki_filp, datasync));
}
#endif
#elif defined(HAVE_FSYNC_RANGE)
/*
* Linux 3.1 API,
* As of 3.1 the responsibility to call filemap_write_and_wait_range() has
* been pushed down in to the .fsync() vfs hook. Additionally, the i_mutex
* lock is no longer held by the caller, for zfs we don't require the lock
* to be held so we don't acquire it.
*/
static int
zpl_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
{
struct inode *inode = filp->f_mapping->host;
znode_t *zp = ITOZ(inode);
zfsvfs_t *zfsvfs = ITOZSB(inode);
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
/*
* The variables z_sync_writes_cnt and z_async_writes_cnt work in
* tandem so that sync writes can detect if there are any non-sync
* writes going on and vice-versa. The "vice-versa" part to this logic
* is located in zfs_putpage() where non-sync writes check if there are
* any ongoing sync writes. If any sync and non-sync writes overlap,
* we do a commit to complete the non-sync writes since the latter can
* potentially take several seconds to complete and thus block sync
* writes in the upcoming call to filemap_write_and_wait_range().
*/
atomic_inc_32(&zp->z_sync_writes_cnt);
/*
* If the following check does not detect an overlapping non-sync write
* (say because it's just about to start), then it is guaranteed that
* the non-sync write will detect this sync write. This is because we
* always increment z_sync_writes_cnt / z_async_writes_cnt before doing
* the check on z_async_writes_cnt / z_sync_writes_cnt here and in
* zfs_putpage() respectively.
*/
if (atomic_load_32(&zp->z_async_writes_cnt) > 0) {
if ((error = zpl_enter(zfsvfs, FTAG)) != 0) {
atomic_dec_32(&zp->z_sync_writes_cnt);
return (error);
}
zil_commit(zfsvfs->z_log, zp->z_id);
zpl_exit(zfsvfs, FTAG);
}
error = filemap_write_and_wait_range(inode->i_mapping, start, end);
/*
* The sync write is not complete yet but we decrement
* z_sync_writes_cnt since zfs_fsync() increments and decrements
* it internally. If a non-sync write starts just after the decrement
* operation but before we call zfs_fsync(), it may not detect this
* overlapping sync write but it does not matter since we have already
* gone past filemap_write_and_wait_range() and we won't block due to
* the non-sync write.
*/
atomic_dec_32(&zp->z_sync_writes_cnt);
if (error)
return (error);
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfs_fsync(zp, datasync, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
#ifdef HAVE_FILE_AIO_FSYNC
static int
zpl_aio_fsync(struct kiocb *kiocb, int datasync)
{
return (zpl_fsync(kiocb->ki_filp, kiocb->ki_pos, -1, datasync));
}
#endif
#else
#error "Unsupported fops->fsync() implementation"
#endif
static inline int
zfs_io_flags(struct kiocb *kiocb)
{
int flags = 0;
#if defined(IOCB_DSYNC)
if (kiocb->ki_flags & IOCB_DSYNC)
flags |= O_DSYNC;
#endif
#if defined(IOCB_SYNC)
if (kiocb->ki_flags & IOCB_SYNC)
flags |= O_SYNC;
#endif
#if defined(IOCB_APPEND)
if (kiocb->ki_flags & IOCB_APPEND)
flags |= O_APPEND;
#endif
#if defined(IOCB_DIRECT)
if (kiocb->ki_flags & IOCB_DIRECT)
flags |= O_DIRECT;
#endif
return (flags);
}
/*
* If relatime is enabled, call file_accessed() if zfs_relatime_need_update()
* is true. This is needed since datasets with inherited "relatime" property
* aren't necessarily mounted with the MNT_RELATIME flag (e.g. after
* `zfs set relatime=...`), which is what relatime test in VFS by
* relatime_need_update() is based on.
*/
static inline void
zpl_file_accessed(struct file *filp)
{
struct inode *ip = filp->f_mapping->host;
if (!IS_NOATIME(ip) && ITOZSB(ip)->z_relatime) {
if (zfs_relatime_need_update(ip))
file_accessed(filp);
} else {
file_accessed(filp);
}
}
#if defined(HAVE_VFS_RW_ITERATE)
/*
* When HAVE_VFS_IOV_ITER is defined the iov_iter structure supports
* iovecs, kvevs, bvecs and pipes, plus all the required interfaces to
* manipulate the iov_iter are available. In which case the full iov_iter
* can be attached to the uio and correctly handled in the lower layers.
* Otherwise, for older kernels extract the iovec and pass it instead.
*/
static void
zpl_uio_init(zfs_uio_t *uio, struct kiocb *kiocb, struct iov_iter *to,
loff_t pos, ssize_t count, size_t skip)
{
#if defined(HAVE_VFS_IOV_ITER)
zfs_uio_iov_iter_init(uio, to, pos, count, skip);
#else
#ifdef HAVE_IOV_ITER_TYPE
zfs_uio_iovec_init(uio, to->iov, to->nr_segs, pos,
iov_iter_type(to) & ITER_KVEC ? UIO_SYSSPACE : UIO_USERSPACE,
count, skip);
#else
zfs_uio_iovec_init(uio, to->iov, to->nr_segs, pos,
to->type & ITER_KVEC ? UIO_SYSSPACE : UIO_USERSPACE,
count, skip);
#endif
#endif
}
static ssize_t
zpl_iter_read(struct kiocb *kiocb, struct iov_iter *to)
{
cred_t *cr = CRED();
fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp;
ssize_t count = iov_iter_count(to);
zfs_uio_t uio;
zpl_uio_init(&uio, kiocb, to, kiocb->ki_pos, count, 0);
crhold(cr);
cookie = spl_fstrans_mark();
int error = -zfs_read(ITOZ(filp->f_mapping->host), &uio,
filp->f_flags | zfs_io_flags(kiocb), cr);
spl_fstrans_unmark(cookie);
crfree(cr);
if (error < 0)
return (error);
ssize_t read = count - uio.uio_resid;
kiocb->ki_pos += read;
zpl_file_accessed(filp);
return (read);
}
static inline ssize_t
zpl_generic_write_checks(struct kiocb *kiocb, struct iov_iter *from,
size_t *countp)
{
#ifdef HAVE_GENERIC_WRITE_CHECKS_KIOCB
ssize_t ret = generic_write_checks(kiocb, from);
if (ret <= 0)
return (ret);
*countp = ret;
#else
struct file *file = kiocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *ip = mapping->host;
int isblk = S_ISBLK(ip->i_mode);
*countp = iov_iter_count(from);
ssize_t ret = generic_write_checks(file, &kiocb->ki_pos, countp, isblk);
if (ret)
return (ret);
#endif
return (0);
}
static ssize_t
zpl_iter_write(struct kiocb *kiocb, struct iov_iter *from)
{
cred_t *cr = CRED();
fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp;
struct inode *ip = filp->f_mapping->host;
zfs_uio_t uio;
size_t count = 0;
ssize_t ret;
ret = zpl_generic_write_checks(kiocb, from, &count);
if (ret)
return (ret);
zpl_uio_init(&uio, kiocb, from, kiocb->ki_pos, count, from->iov_offset);
crhold(cr);
cookie = spl_fstrans_mark();
int error = -zfs_write(ITOZ(ip), &uio,
filp->f_flags | zfs_io_flags(kiocb), cr);
spl_fstrans_unmark(cookie);
crfree(cr);
if (error < 0)
return (error);
ssize_t wrote = count - uio.uio_resid;
kiocb->ki_pos += wrote;
return (wrote);
}
#else /* !HAVE_VFS_RW_ITERATE */
static ssize_t
zpl_aio_read(struct kiocb *kiocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
cred_t *cr = CRED();
fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp;
size_t count;
ssize_t ret;
ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
if (ret)
return (ret);
zfs_uio_t uio;
zfs_uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE,
count, 0);
crhold(cr);
cookie = spl_fstrans_mark();
int error = -zfs_read(ITOZ(filp->f_mapping->host), &uio,
filp->f_flags | zfs_io_flags(kiocb), cr);
spl_fstrans_unmark(cookie);
crfree(cr);
if (error < 0)
return (error);
ssize_t read = count - uio.uio_resid;
kiocb->ki_pos += read;
zpl_file_accessed(filp);
return (read);
}
static ssize_t
zpl_aio_write(struct kiocb *kiocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
cred_t *cr = CRED();
fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp;
struct inode *ip = filp->f_mapping->host;
size_t count;
ssize_t ret;
ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
if (ret)
return (ret);
ret = generic_write_checks(filp, &pos, &count, S_ISBLK(ip->i_mode));
if (ret)
return (ret);
kiocb->ki_pos = pos;
zfs_uio_t uio;
zfs_uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE,
count, 0);
crhold(cr);
cookie = spl_fstrans_mark();
int error = -zfs_write(ITOZ(ip), &uio,
filp->f_flags | zfs_io_flags(kiocb), cr);
spl_fstrans_unmark(cookie);
crfree(cr);
if (error < 0)
return (error);
ssize_t wrote = count - uio.uio_resid;
kiocb->ki_pos += wrote;
return (wrote);
}
#endif /* HAVE_VFS_RW_ITERATE */
#if defined(HAVE_VFS_RW_ITERATE)
static ssize_t
zpl_direct_IO_impl(int rw, struct kiocb *kiocb, struct iov_iter *iter)
{
if (rw == WRITE)
return (zpl_iter_write(kiocb, iter));
else
return (zpl_iter_read(kiocb, iter));
}
#if defined(HAVE_VFS_DIRECT_IO_ITER)
static ssize_t
zpl_direct_IO(struct kiocb *kiocb, struct iov_iter *iter)
{
return (zpl_direct_IO_impl(iov_iter_rw(iter), kiocb, iter));
}
#elif defined(HAVE_VFS_DIRECT_IO_ITER_OFFSET)
static ssize_t
zpl_direct_IO(struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
{
ASSERT3S(pos, ==, kiocb->ki_pos);
return (zpl_direct_IO_impl(iov_iter_rw(iter), kiocb, iter));
}
#elif defined(HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET)
static ssize_t
zpl_direct_IO(int rw, struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
{
ASSERT3S(pos, ==, kiocb->ki_pos);
return (zpl_direct_IO_impl(rw, kiocb, iter));
}
#else
#error "Unknown direct IO interface"
#endif
#else /* HAVE_VFS_RW_ITERATE */
#if defined(HAVE_VFS_DIRECT_IO_IOVEC)
static ssize_t
zpl_direct_IO(int rw, struct kiocb *kiocb, const struct iovec *iov,
loff_t pos, unsigned long nr_segs)
{
if (rw == WRITE)
return (zpl_aio_write(kiocb, iov, nr_segs, pos));
else
return (zpl_aio_read(kiocb, iov, nr_segs, pos));
}
#elif defined(HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET)
static ssize_t
zpl_direct_IO(int rw, struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
{
const struct iovec *iovp = iov_iter_iovec(iter);
unsigned long nr_segs = iter->nr_segs;
ASSERT3S(pos, ==, kiocb->ki_pos);
if (rw == WRITE)
return (zpl_aio_write(kiocb, iovp, nr_segs, pos));
else
return (zpl_aio_read(kiocb, iovp, nr_segs, pos));
}
#else
#error "Unknown direct IO interface"
#endif
#endif /* HAVE_VFS_RW_ITERATE */
static loff_t
zpl_llseek(struct file *filp, loff_t offset, int whence)
{
#if defined(SEEK_HOLE) && defined(SEEK_DATA)
fstrans_cookie_t cookie;
if (whence == SEEK_DATA || whence == SEEK_HOLE) {
struct inode *ip = filp->f_mapping->host;
loff_t maxbytes = ip->i_sb->s_maxbytes;
loff_t error;
spl_inode_lock_shared(ip);
cookie = spl_fstrans_mark();
error = -zfs_holey(ITOZ(ip), whence, &offset);
spl_fstrans_unmark(cookie);
if (error == 0)
error = lseek_execute(filp, ip, offset, maxbytes);
spl_inode_unlock_shared(ip);
return (error);
}
#endif /* SEEK_HOLE && SEEK_DATA */
return (generic_file_llseek(filp, offset, whence));
}
/*
* It's worth taking a moment to describe how mmap is implemented
* for zfs because it differs considerably from other Linux filesystems.
* However, this issue is handled the same way under OpenSolaris.
*
* The issue is that by design zfs bypasses the Linux page cache and
* leaves all caching up to the ARC. This has been shown to work
* well for the common read(2)/write(2) case. However, mmap(2)
* is problem because it relies on being tightly integrated with the
* page cache. To handle this we cache mmap'ed files twice, once in
* the ARC and a second time in the page cache. The code is careful
* to keep both copies synchronized.
*
* When a file with an mmap'ed region is written to using write(2)
* both the data in the ARC and existing pages in the page cache
* are updated. For a read(2) data will be read first from the page
* cache then the ARC if needed. Neither a write(2) or read(2) will
* will ever result in new pages being added to the page cache.
*
* New pages are added to the page cache only via .readpage() which
* is called when the vfs needs to read a page off disk to back the
* virtual memory region. These pages may be modified without
* notifying the ARC and will be written out periodically via
* .writepage(). This will occur due to either a sync or the usual
* page aging behavior. Note because a read(2) of a mmap'ed file
* will always check the page cache first even when the ARC is out
* of date correct data will still be returned.
*
* While this implementation ensures correct behavior it does have
* have some drawbacks. The most obvious of which is that it
* increases the required memory footprint when access mmap'ed
* files. It also adds additional complexity to the code keeping
* both caches synchronized.
*
* Longer term it may be possible to cleanly resolve this wart by
* mapping page cache pages directly on to the ARC buffers. The
* Linux address space operations are flexible enough to allow
* selection of which pages back a particular index. The trick
* would be working out the details of which subsystem is in
* charge, the ARC, the page cache, or both. It may also prove
* helpful to move the ARC buffers to a scatter-gather lists
* rather than a vmalloc'ed region.
*/
static int
zpl_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct inode *ip = filp->f_mapping->host;
int error;
fstrans_cookie_t cookie;
cookie = spl_fstrans_mark();
error = -zfs_map(ip, vma->vm_pgoff, (caddr_t *)vma->vm_start,
(size_t)(vma->vm_end - vma->vm_start), vma->vm_flags);
spl_fstrans_unmark(cookie);
if (error)
return (error);
error = generic_file_mmap(filp, vma);
if (error)
return (error);
#if !defined(HAVE_FILEMAP_RANGE_HAS_PAGE)
znode_t *zp = ITOZ(ip);
mutex_enter(&zp->z_lock);
zp->z_is_mapped = B_TRUE;
mutex_exit(&zp->z_lock);
#endif
return (error);
}
/*
* Populate a page with data for the Linux page cache. This function is
* only used to support mmap(2). There will be an identical copy of the
* data in the ARC which is kept up to date via .write() and .writepage().
*/
static inline int
zpl_readpage_common(struct page *pp)
{
fstrans_cookie_t cookie;
ASSERT(PageLocked(pp));
cookie = spl_fstrans_mark();
int error = -zfs_getpage(pp->mapping->host, pp);
spl_fstrans_unmark(cookie);
unlock_page(pp);
return (error);
}
#ifdef HAVE_VFS_READ_FOLIO
static int
zpl_read_folio(struct file *filp, struct folio *folio)
{
return (zpl_readpage_common(&folio->page));
}
#else
static int
zpl_readpage(struct file *filp, struct page *pp)
{
return (zpl_readpage_common(pp));
}
#endif
static int
zpl_readpage_filler(void *data, struct page *pp)
{
return (zpl_readpage_common(pp));
}
/*
* Populate a set of pages with data for the Linux page cache. This
* function will only be called for read ahead and never for demand
* paging. For simplicity, the code relies on read_cache_pages() to
* correctly lock each page for IO and call zpl_readpage().
*/
#ifdef HAVE_VFS_READPAGES
static int
zpl_readpages(struct file *filp, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
return (read_cache_pages(mapping, pages, zpl_readpage_filler, NULL));
}
#else
static void
zpl_readahead(struct readahead_control *ractl)
{
struct page *page;
while ((page = readahead_page(ractl)) != NULL) {
int ret;
ret = zpl_readpage_filler(NULL, page);
put_page(page);
if (ret)
break;
}
}
#endif
static int
zpl_putpage(struct page *pp, struct writeback_control *wbc, void *data)
{
boolean_t *for_sync = data;
fstrans_cookie_t cookie;
ASSERT(PageLocked(pp));
ASSERT(!PageWriteback(pp));
cookie = spl_fstrans_mark();
(void) zfs_putpage(pp->mapping->host, pp, wbc, *for_sync);
spl_fstrans_unmark(cookie);
return (0);
}
#ifdef HAVE_WRITEPAGE_T_FOLIO
static int
zpl_putfolio(struct folio *pp, struct writeback_control *wbc, void *data)
{
(void) zpl_putpage(&pp->page, wbc, data);
return (0);
}
#endif
static inline int
zpl_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc, void *data)
{
int result;
#ifdef HAVE_WRITEPAGE_T_FOLIO
result = write_cache_pages(mapping, wbc, zpl_putfolio, data);
#else
result = write_cache_pages(mapping, wbc, zpl_putpage, data);
#endif
return (result);
}
static int
zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
znode_t *zp = ITOZ(mapping->host);
zfsvfs_t *zfsvfs = ITOZSB(mapping->host);
enum writeback_sync_modes sync_mode;
int result;
if ((result = zpl_enter(zfsvfs, FTAG)) != 0)
return (result);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
wbc->sync_mode = WB_SYNC_ALL;
zpl_exit(zfsvfs, FTAG);
sync_mode = wbc->sync_mode;
/*
* We don't want to run write_cache_pages() in SYNC mode here, because
* that would make putpage() wait for a single page to be committed to
* disk every single time, resulting in atrocious performance. Instead
* we run it once in non-SYNC mode so that the ZIL gets all the data,
* and then we commit it all in one go.
*/
boolean_t for_sync = (sync_mode == WB_SYNC_ALL);
wbc->sync_mode = WB_SYNC_NONE;
result = zpl_write_cache_pages(mapping, wbc, &for_sync);
if (sync_mode != wbc->sync_mode) {
if ((result = zpl_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (result);
if (zfsvfs->z_log != NULL)
zil_commit(zfsvfs->z_log, zp->z_id);
zpl_exit(zfsvfs, FTAG);
/*
* We need to call write_cache_pages() again (we can't just
* return after the commit) because the previous call in
* non-SYNC mode does not guarantee that we got all the dirty
* pages (see the implementation of write_cache_pages() for
* details). That being said, this is a no-op in most cases.
*/
wbc->sync_mode = sync_mode;
result = zpl_write_cache_pages(mapping, wbc, &for_sync);
}
return (result);
}
/*
* Write out dirty pages to the ARC, this function is only required to
* support mmap(2). Mapped pages may be dirtied by memory operations
* which never call .write(). These dirty pages are kept in sync with
* the ARC buffers via this hook.
*/
static int
zpl_writepage(struct page *pp, struct writeback_control *wbc)
{
if (ITOZSB(pp->mapping->host)->z_os->os_sync == ZFS_SYNC_ALWAYS)
wbc->sync_mode = WB_SYNC_ALL;
boolean_t for_sync = (wbc->sync_mode == WB_SYNC_ALL);
return (zpl_putpage(pp, wbc, &for_sync));
}
/*
* The flag combination which matches the behavior of zfs_space() is
* FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE. The FALLOC_FL_PUNCH_HOLE
* flag was introduced in the 2.6.38 kernel.
*
* The original mode=0 (allocate space) behavior can be reasonably emulated
* by checking if enough space exists and creating a sparse file, as real
* persistent space reservation is not possible due to COW, snapshots, etc.
*/
static long
zpl_fallocate_common(struct inode *ip, int mode, loff_t offset, loff_t len)
{
cred_t *cr = CRED();
loff_t olen;
fstrans_cookie_t cookie;
int error = 0;
int test_mode = FALLOC_FL_PUNCH_HOLE;
#ifdef HAVE_FALLOC_FL_ZERO_RANGE
test_mode |= FALLOC_FL_ZERO_RANGE;
#endif
if ((mode & ~(FALLOC_FL_KEEP_SIZE | test_mode)) != 0)
return (-EOPNOTSUPP);
if (offset < 0 || len <= 0)
return (-EINVAL);
spl_inode_lock(ip);
olen = i_size_read(ip);
crhold(cr);
cookie = spl_fstrans_mark();
if (mode & (test_mode)) {
flock64_t bf;
if (mode & FALLOC_FL_KEEP_SIZE) {
if (offset > olen)
goto out_unmark;
if (offset + len > olen)
len = olen - offset;
}
bf.l_type = F_WRLCK;
bf.l_whence = SEEK_SET;
bf.l_start = offset;
bf.l_len = len;
bf.l_pid = 0;
error = -zfs_space(ITOZ(ip), F_FREESP, &bf, O_RDWR, offset, cr);
} else if ((mode & ~FALLOC_FL_KEEP_SIZE) == 0) {
unsigned int percent = zfs_fallocate_reserve_percent;
struct kstatfs statfs;
/* Legacy mode, disable fallocate compatibility. */
if (percent == 0) {
error = -EOPNOTSUPP;
goto out_unmark;
}
/*
* Use zfs_statvfs() instead of dmu_objset_space() since it
* also checks project quota limits, which are relevant here.
*/
error = zfs_statvfs(ip, &statfs);
if (error)
goto out_unmark;
/*
* Shrink available space a bit to account for overhead/races.
* We know the product previously fit into availbytes from
* dmu_objset_space(), so the smaller product will also fit.
*/
if (len > statfs.f_bavail * (statfs.f_bsize * 100 / percent)) {
error = -ENOSPC;
goto out_unmark;
}
if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > olen)
error = zfs_freesp(ITOZ(ip), offset + len, 0, 0, FALSE);
}
out_unmark:
spl_fstrans_unmark(cookie);
spl_inode_unlock(ip);
crfree(cr);
return (error);
}
static long
zpl_fallocate(struct file *filp, int mode, loff_t offset, loff_t len)
{
return zpl_fallocate_common(file_inode(filp),
mode, offset, len);
}
static int
zpl_ioctl_getversion(struct file *filp, void __user *arg)
{
uint32_t generation = file_inode(filp)->i_generation;
return (copy_to_user(arg, &generation, sizeof (generation)));
}
#ifdef HAVE_FILE_FADVISE
static int
zpl_fadvise(struct file *filp, loff_t offset, loff_t len, int advice)
{
struct inode *ip = file_inode(filp);
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
objset_t *os = zfsvfs->z_os;
int error = 0;
if (S_ISFIFO(ip->i_mode))
return (-ESPIPE);
if (offset < 0 || len < 0)
return (-EINVAL);
if ((error = zpl_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
switch (advice) {
case POSIX_FADV_SEQUENTIAL:
case POSIX_FADV_WILLNEED:
#ifdef HAVE_GENERIC_FADVISE
if (zn_has_cached_data(zp, offset, offset + len - 1))
error = generic_fadvise(filp, offset, len, advice);
#endif
/*
* Pass on the caller's size directly, but note that
* dmu_prefetch_max will effectively cap it. If there
* really is a larger sequential access pattern, perhaps
* dmu_zfetch will detect it.
*/
if (len == 0)
len = i_size_read(ip) - offset;
dmu_prefetch(os, zp->z_id, 0, offset, len,
ZIO_PRIORITY_ASYNC_READ);
break;
case POSIX_FADV_NORMAL:
case POSIX_FADV_RANDOM:
case POSIX_FADV_DONTNEED:
case POSIX_FADV_NOREUSE:
/* ignored for now */
break;
default:
error = -EINVAL;
break;
}
zfs_exit(zfsvfs, FTAG);
return (error);
}
#endif /* HAVE_FILE_FADVISE */
#define ZFS_FL_USER_VISIBLE (FS_FL_USER_VISIBLE | ZFS_PROJINHERIT_FL)
#define ZFS_FL_USER_MODIFIABLE (FS_FL_USER_MODIFIABLE | ZFS_PROJINHERIT_FL)
static uint32_t
__zpl_ioctl_getflags(struct inode *ip)
{
uint64_t zfs_flags = ITOZ(ip)->z_pflags;
uint32_t ioctl_flags = 0;
if (zfs_flags & ZFS_IMMUTABLE)
ioctl_flags |= FS_IMMUTABLE_FL;
if (zfs_flags & ZFS_APPENDONLY)
ioctl_flags |= FS_APPEND_FL;
if (zfs_flags & ZFS_NODUMP)
ioctl_flags |= FS_NODUMP_FL;
if (zfs_flags & ZFS_PROJINHERIT)
ioctl_flags |= ZFS_PROJINHERIT_FL;
return (ioctl_flags & ZFS_FL_USER_VISIBLE);
}
/*
* Map zfs file z_pflags (xvattr_t) to linux file attributes. Only file
* attributes common to both Linux and Solaris are mapped.
*/
static int
zpl_ioctl_getflags(struct file *filp, void __user *arg)
{
uint32_t flags;
int err;
flags = __zpl_ioctl_getflags(file_inode(filp));
err = copy_to_user(arg, &flags, sizeof (flags));
return (err);
}
/*
* fchange() is a helper macro to detect if we have been asked to change a
* flag. This is ugly, but the requirement that we do this is a consequence of
* how the Linux file attribute interface was designed. Another consequence is
* that concurrent modification of files suffers from a TOCTOU race. Neither
* are things we can fix without modifying the kernel-userland interface, which
* is outside of our jurisdiction.
*/
#define fchange(f0, f1, b0, b1) (!((f0) & (b0)) != !((f1) & (b1)))
static int
__zpl_ioctl_setflags(struct inode *ip, uint32_t ioctl_flags, xvattr_t *xva)
{
uint64_t zfs_flags = ITOZ(ip)->z_pflags;
xoptattr_t *xoap;
if (ioctl_flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL |
ZFS_PROJINHERIT_FL))
return (-EOPNOTSUPP);
if (ioctl_flags & ~ZFS_FL_USER_MODIFIABLE)
return (-EACCES);
if ((fchange(ioctl_flags, zfs_flags, FS_IMMUTABLE_FL, ZFS_IMMUTABLE) ||
fchange(ioctl_flags, zfs_flags, FS_APPEND_FL, ZFS_APPENDONLY)) &&
!capable(CAP_LINUX_IMMUTABLE))
return (-EPERM);
if (!zpl_inode_owner_or_capable(zfs_init_idmap, ip))
return (-EACCES);
xva_init(xva);
xoap = xva_getxoptattr(xva);
#define FLAG_CHANGE(iflag, zflag, xflag, xfield) do { \
if (((ioctl_flags & (iflag)) && !(zfs_flags & (zflag))) || \
((zfs_flags & (zflag)) && !(ioctl_flags & (iflag)))) { \
XVA_SET_REQ(xva, (xflag)); \
(xfield) = ((ioctl_flags & (iflag)) != 0); \
} \
} while (0)
FLAG_CHANGE(FS_IMMUTABLE_FL, ZFS_IMMUTABLE, XAT_IMMUTABLE,
xoap->xoa_immutable);
FLAG_CHANGE(FS_APPEND_FL, ZFS_APPENDONLY, XAT_APPENDONLY,
xoap->xoa_appendonly);
FLAG_CHANGE(FS_NODUMP_FL, ZFS_NODUMP, XAT_NODUMP,
xoap->xoa_nodump);
FLAG_CHANGE(ZFS_PROJINHERIT_FL, ZFS_PROJINHERIT, XAT_PROJINHERIT,
xoap->xoa_projinherit);
#undef FLAG_CHANGE
return (0);
}
static int
zpl_ioctl_setflags(struct file *filp, void __user *arg)
{
struct inode *ip = file_inode(filp);
uint32_t flags;
cred_t *cr = CRED();
xvattr_t xva;
int err;
fstrans_cookie_t cookie;
if (copy_from_user(&flags, arg, sizeof (flags)))
return (-EFAULT);
err = __zpl_ioctl_setflags(ip, flags, &xva);
if (err)
return (err);
crhold(cr);
cookie = spl_fstrans_mark();
err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr, zfs_init_idmap);
spl_fstrans_unmark(cookie);
crfree(cr);
return (err);
}
static int
zpl_ioctl_getxattr(struct file *filp, void __user *arg)
{
zfsxattr_t fsx = { 0 };
struct inode *ip = file_inode(filp);
int err;
fsx.fsx_xflags = __zpl_ioctl_getflags(ip);
fsx.fsx_projid = ITOZ(ip)->z_projid;
err = copy_to_user(arg, &fsx, sizeof (fsx));
return (err);
}
static int
zpl_ioctl_setxattr(struct file *filp, void __user *arg)
{
struct inode *ip = file_inode(filp);
zfsxattr_t fsx;
cred_t *cr = CRED();
xvattr_t xva;
xoptattr_t *xoap;
int err;
fstrans_cookie_t cookie;
if (copy_from_user(&fsx, arg, sizeof (fsx)))
return (-EFAULT);
if (!zpl_is_valid_projid(fsx.fsx_projid))
return (-EINVAL);
err = __zpl_ioctl_setflags(ip, fsx.fsx_xflags, &xva);
if (err)
return (err);
xoap = xva_getxoptattr(&xva);
XVA_SET_REQ(&xva, XAT_PROJID);
xoap->xoa_projid = fsx.fsx_projid;
crhold(cr);
cookie = spl_fstrans_mark();
err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr, zfs_init_idmap);
spl_fstrans_unmark(cookie);
crfree(cr);
return (err);
}
/*
* Expose Additional File Level Attributes of ZFS.
*/
static int
zpl_ioctl_getdosflags(struct file *filp, void __user *arg)
{
struct inode *ip = file_inode(filp);
uint64_t dosflags = ITOZ(ip)->z_pflags;
dosflags &= ZFS_DOS_FL_USER_VISIBLE;
int err = copy_to_user(arg, &dosflags, sizeof (dosflags));
return (err);
}
static int
__zpl_ioctl_setdosflags(struct inode *ip, uint64_t ioctl_flags, xvattr_t *xva)
{
uint64_t zfs_flags = ITOZ(ip)->z_pflags;
xoptattr_t *xoap;
if (ioctl_flags & (~ZFS_DOS_FL_USER_VISIBLE))
return (-EOPNOTSUPP);
if ((fchange(ioctl_flags, zfs_flags, ZFS_IMMUTABLE, ZFS_IMMUTABLE) ||
fchange(ioctl_flags, zfs_flags, ZFS_APPENDONLY, ZFS_APPENDONLY)) &&
!capable(CAP_LINUX_IMMUTABLE))
return (-EPERM);
if (!zpl_inode_owner_or_capable(zfs_init_idmap, ip))
return (-EACCES);
xva_init(xva);
xoap = xva_getxoptattr(xva);
#define FLAG_CHANGE(iflag, xflag, xfield) do { \
if (((ioctl_flags & (iflag)) && !(zfs_flags & (iflag))) || \
((zfs_flags & (iflag)) && !(ioctl_flags & (iflag)))) { \
XVA_SET_REQ(xva, (xflag)); \
(xfield) = ((ioctl_flags & (iflag)) != 0); \
} \
} while (0)
FLAG_CHANGE(ZFS_IMMUTABLE, XAT_IMMUTABLE, xoap->xoa_immutable);
FLAG_CHANGE(ZFS_APPENDONLY, XAT_APPENDONLY, xoap->xoa_appendonly);
FLAG_CHANGE(ZFS_NODUMP, XAT_NODUMP, xoap->xoa_nodump);
FLAG_CHANGE(ZFS_READONLY, XAT_READONLY, xoap->xoa_readonly);
FLAG_CHANGE(ZFS_HIDDEN, XAT_HIDDEN, xoap->xoa_hidden);
FLAG_CHANGE(ZFS_SYSTEM, XAT_SYSTEM, xoap->xoa_system);
FLAG_CHANGE(ZFS_ARCHIVE, XAT_ARCHIVE, xoap->xoa_archive);
FLAG_CHANGE(ZFS_NOUNLINK, XAT_NOUNLINK, xoap->xoa_nounlink);
FLAG_CHANGE(ZFS_REPARSE, XAT_REPARSE, xoap->xoa_reparse);
FLAG_CHANGE(ZFS_OFFLINE, XAT_OFFLINE, xoap->xoa_offline);
FLAG_CHANGE(ZFS_SPARSE, XAT_SPARSE, xoap->xoa_sparse);
#undef FLAG_CHANGE
return (0);
}
/*
* Set Additional File Level Attributes of ZFS.
*/
static int
zpl_ioctl_setdosflags(struct file *filp, void __user *arg)
{
struct inode *ip = file_inode(filp);
uint64_t dosflags;
cred_t *cr = CRED();
xvattr_t xva;
int err;
fstrans_cookie_t cookie;
if (copy_from_user(&dosflags, arg, sizeof (dosflags)))
return (-EFAULT);
err = __zpl_ioctl_setdosflags(ip, dosflags, &xva);
if (err)
return (err);
crhold(cr);
cookie = spl_fstrans_mark();
err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr, zfs_init_idmap);
spl_fstrans_unmark(cookie);
crfree(cr);
return (err);
}
static long
zpl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case FS_IOC_GETVERSION:
return (zpl_ioctl_getversion(filp, (void *)arg));
case FS_IOC_GETFLAGS:
return (zpl_ioctl_getflags(filp, (void *)arg));
case FS_IOC_SETFLAGS:
return (zpl_ioctl_setflags(filp, (void *)arg));
case ZFS_IOC_FSGETXATTR:
return (zpl_ioctl_getxattr(filp, (void *)arg));
case ZFS_IOC_FSSETXATTR:
return (zpl_ioctl_setxattr(filp, (void *)arg));
case ZFS_IOC_GETDOSFLAGS:
return (zpl_ioctl_getdosflags(filp, (void *)arg));
case ZFS_IOC_SETDOSFLAGS:
return (zpl_ioctl_setdosflags(filp, (void *)arg));
+ case ZFS_IOC_COMPAT_FICLONE:
+ return (zpl_ioctl_ficlone(filp, (void *)arg));
+ case ZFS_IOC_COMPAT_FICLONERANGE:
+ return (zpl_ioctl_ficlonerange(filp, (void *)arg));
+ case ZFS_IOC_COMPAT_FIDEDUPERANGE:
+ return (zpl_ioctl_fideduperange(filp, (void *)arg));
default:
return (-ENOTTY);
}
}
#ifdef CONFIG_COMPAT
static long
zpl_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case FS_IOC32_GETVERSION:
cmd = FS_IOC_GETVERSION;
break;
case FS_IOC32_GETFLAGS:
cmd = FS_IOC_GETFLAGS;
break;
case FS_IOC32_SETFLAGS:
cmd = FS_IOC_SETFLAGS;
break;
default:
return (-ENOTTY);
}
return (zpl_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)));
}
#endif /* CONFIG_COMPAT */
-
const struct address_space_operations zpl_address_space_operations = {
#ifdef HAVE_VFS_READPAGES
.readpages = zpl_readpages,
#else
.readahead = zpl_readahead,
#endif
#ifdef HAVE_VFS_READ_FOLIO
.read_folio = zpl_read_folio,
#else
.readpage = zpl_readpage,
#endif
.writepage = zpl_writepage,
.writepages = zpl_writepages,
.direct_IO = zpl_direct_IO,
#ifdef HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS
.set_page_dirty = __set_page_dirty_nobuffers,
#endif
#ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO
.dirty_folio = filemap_dirty_folio,
#endif
};
+#ifdef HAVE_VFS_FILE_OPERATIONS_EXTEND
+const struct file_operations_extend zpl_file_operations = {
+ .kabi_fops = {
+#else
const struct file_operations zpl_file_operations = {
+#endif
.open = zpl_open,
.release = zpl_release,
.llseek = zpl_llseek,
#ifdef HAVE_VFS_RW_ITERATE
#ifdef HAVE_NEW_SYNC_READ
.read = new_sync_read,
.write = new_sync_write,
#endif
.read_iter = zpl_iter_read,
.write_iter = zpl_iter_write,
#ifdef HAVE_VFS_IOV_ITER
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
#endif
#else
.read = do_sync_read,
.write = do_sync_write,
.aio_read = zpl_aio_read,
.aio_write = zpl_aio_write,
#endif
.mmap = zpl_mmap,
.fsync = zpl_fsync,
#ifdef HAVE_FILE_AIO_FSYNC
.aio_fsync = zpl_aio_fsync,
#endif
.fallocate = zpl_fallocate,
+#ifdef HAVE_VFS_COPY_FILE_RANGE
+ .copy_file_range = zpl_copy_file_range,
+#endif
+#ifdef HAVE_VFS_CLONE_FILE_RANGE
+ .clone_file_range = zpl_clone_file_range,
+#endif
+#ifdef HAVE_VFS_REMAP_FILE_RANGE
+ .remap_file_range = zpl_remap_file_range,
+#endif
+#ifdef HAVE_VFS_DEDUPE_FILE_RANGE
+ .dedupe_file_range = zpl_dedupe_file_range,
+#endif
#ifdef HAVE_FILE_FADVISE
.fadvise = zpl_fadvise,
#endif
.unlocked_ioctl = zpl_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = zpl_compat_ioctl,
#endif
+#ifdef HAVE_VFS_FILE_OPERATIONS_EXTEND
+ }, /* kabi_fops */
+ .copy_file_range = zpl_copy_file_range,
+ .clone_file_range = zpl_clone_file_range,
+#endif
};
const struct file_operations zpl_dir_file_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
#if defined(HAVE_VFS_ITERATE_SHARED)
.iterate_shared = zpl_iterate,
#elif defined(HAVE_VFS_ITERATE)
.iterate = zpl_iterate,
#else
.readdir = zpl_readdir,
#endif
.fsync = zpl_fsync,
.unlocked_ioctl = zpl_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = zpl_compat_ioctl,
#endif
};
/* CSTYLED */
module_param(zfs_fallocate_reserve_percent, uint, 0644);
MODULE_PARM_DESC(zfs_fallocate_reserve_percent,
"Percentage of length to use for the available capacity check");
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_file_range.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_file_range.c
new file mode 100644
index 000000000000..2abbf44df587
--- /dev/null
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_file_range.c
@@ -0,0 +1,272 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or https://opensource.org/licenses/CDDL-1.0.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2023, Klara Inc.
+ */
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+#include <linux/fs.h>
+#include <sys/file.h>
+#include <sys/zfs_znode.h>
+#include <sys/zfs_vnops.h>
+#include <sys/zfeature.h>
+
+/*
+ * Clone part of a file via block cloning.
+ *
+ * Note that we are not required to update file offsets; the kernel will take
+ * care of that depending on how it was called.
+ */
+static ssize_t
+__zpl_clone_file_range(struct file *src_file, loff_t src_off,
+ struct file *dst_file, loff_t dst_off, size_t len)
+{
+ struct inode *src_i = file_inode(src_file);
+ struct inode *dst_i = file_inode(dst_file);
+ uint64_t src_off_o = (uint64_t)src_off;
+ uint64_t dst_off_o = (uint64_t)dst_off;
+ uint64_t len_o = (uint64_t)len;
+ cred_t *cr = CRED();
+ fstrans_cookie_t cookie;
+ int err;
+
+ if (!spa_feature_is_enabled(
+ dmu_objset_spa(ITOZSB(dst_i)->z_os), SPA_FEATURE_BLOCK_CLONING))
+ return (-EOPNOTSUPP);
+
+ if (src_i != dst_i)
+ spl_inode_lock_shared(src_i);
+ spl_inode_lock(dst_i);
+
+ crhold(cr);
+ cookie = spl_fstrans_mark();
+
+ err = -zfs_clone_range(ITOZ(src_i), &src_off_o, ITOZ(dst_i),
+ &dst_off_o, &len_o, cr);
+
+ spl_fstrans_unmark(cookie);
+ crfree(cr);
+
+ spl_inode_unlock(dst_i);
+ if (src_i != dst_i)
+ spl_inode_unlock_shared(src_i);
+
+ if (err < 0)
+ return (err);
+
+ return ((ssize_t)len_o);
+}
+
+#if defined(HAVE_VFS_COPY_FILE_RANGE) || \
+ defined(HAVE_VFS_FILE_OPERATIONS_EXTEND)
+/*
+ * Entry point for copy_file_range(). Copy len bytes from src_off in src_file
+ * to dst_off in dst_file. We are permitted to do this however we like, so we
+ * try to just clone the blocks, and if we can't support it, fall back to the
+ * kernel's generic byte copy function.
+ */
+ssize_t
+zpl_copy_file_range(struct file *src_file, loff_t src_off,
+ struct file *dst_file, loff_t dst_off, size_t len, unsigned int flags)
+{
+ ssize_t ret;
+
+ if (flags != 0)
+ return (-EINVAL);
+
+ /* Try to do it via zfs_clone_range() */
+ ret = __zpl_clone_file_range(src_file, src_off,
+ dst_file, dst_off, len);
+
+#ifdef HAVE_VFS_GENERIC_COPY_FILE_RANGE
+ /*
+ * Since Linux 5.3 the filesystem driver is responsible for executing
+ * an appropriate fallback, and a generic fallback function is provided.
+ */
+ if (ret == -EOPNOTSUPP || ret == -EINVAL || ret == -EXDEV ||
+ ret == -EAGAIN)
+ ret = generic_copy_file_range(src_file, src_off, dst_file,
+ dst_off, len, flags);
+#else
+ /*
+ * Before Linux 5.3 the filesystem has to return -EOPNOTSUPP to signal
+ * to the kernel that it should fallback to a content copy.
+ */
+ if (ret == -EINVAL || ret == -EXDEV || ret == -EAGAIN)
+ ret = -EOPNOTSUPP;
+#endif /* HAVE_VFS_GENERIC_COPY_FILE_RANGE */
+
+ return (ret);
+}
+#endif /* HAVE_VFS_COPY_FILE_RANGE || HAVE_VFS_FILE_OPERATIONS_EXTEND */
+
+#ifdef HAVE_VFS_REMAP_FILE_RANGE
+/*
+ * Entry point for FICLONE/FICLONERANGE/FIDEDUPERANGE.
+ *
+ * FICLONE and FICLONERANGE are basically the same as copy_file_range(), except
+ * that they must clone - they cannot fall back to copying. FICLONE is exactly
+ * FICLONERANGE, for the entire file. We don't need to try to tell them apart;
+ * the kernel will sort that out for us.
+ *
+ * FIDEDUPERANGE is for turning a non-clone into a clone, that is, compare the
+ * range in both files and if they're the same, arrange for them to be backed
+ * by the same storage.
+ */
+loff_t
+zpl_remap_file_range(struct file *src_file, loff_t src_off,
+ struct file *dst_file, loff_t dst_off, loff_t len, unsigned int flags)
+{
+ if (flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_CAN_SHORTEN))
+ return (-EINVAL);
+
+ /*
+ * REMAP_FILE_CAN_SHORTEN lets us know we can clone less than the given
+ * range if we want. Its designed for filesystems that make data past
+ * EOF available, and don't want it to be visible in both files. ZFS
+ * doesn't do that, so we just turn the flag off.
+ */
+ flags &= ~REMAP_FILE_CAN_SHORTEN;
+
+ if (flags & REMAP_FILE_DEDUP)
+ /* No support for dedup yet */
+ return (-EOPNOTSUPP);
+
+ /* Zero length means to clone everything to the end of the file */
+ if (len == 0)
+ len = i_size_read(file_inode(src_file)) - src_off;
+
+ return (__zpl_clone_file_range(src_file, src_off,
+ dst_file, dst_off, len));
+}
+#endif /* HAVE_VFS_REMAP_FILE_RANGE */
+
+#if defined(HAVE_VFS_CLONE_FILE_RANGE) || \
+ defined(HAVE_VFS_FILE_OPERATIONS_EXTEND)
+/*
+ * Entry point for FICLONE and FICLONERANGE, before Linux 4.20.
+ */
+int
+zpl_clone_file_range(struct file *src_file, loff_t src_off,
+ struct file *dst_file, loff_t dst_off, uint64_t len)
+{
+ /* Zero length means to clone everything to the end of the file */
+ if (len == 0)
+ len = i_size_read(file_inode(src_file)) - src_off;
+
+ return (__zpl_clone_file_range(src_file, src_off,
+ dst_file, dst_off, len));
+}
+#endif /* HAVE_VFS_CLONE_FILE_RANGE || HAVE_VFS_FILE_OPERATIONS_EXTEND */
+
+#ifdef HAVE_VFS_DEDUPE_FILE_RANGE
+/*
+ * Entry point for FIDEDUPERANGE, before Linux 4.20.
+ */
+int
+zpl_dedupe_file_range(struct file *src_file, loff_t src_off,
+ struct file *dst_file, loff_t dst_off, uint64_t len)
+{
+ /* No support for dedup yet */
+ return (-EOPNOTSUPP);
+}
+#endif /* HAVE_VFS_DEDUPE_FILE_RANGE */
+
+/* Entry point for FICLONE, before Linux 4.5. */
+long
+zpl_ioctl_ficlone(struct file *dst_file, void *arg)
+{
+ unsigned long sfd = (unsigned long)arg;
+
+ struct file *src_file = fget(sfd);
+ if (src_file == NULL)
+ return (-EBADF);
+
+ if (dst_file->f_op != src_file->f_op)
+ return (-EXDEV);
+
+ size_t len = i_size_read(file_inode(src_file));
+
+ ssize_t ret =
+ __zpl_clone_file_range(src_file, 0, dst_file, 0, len);
+
+ fput(src_file);
+
+ if (ret < 0) {
+ if (ret == -EOPNOTSUPP)
+ return (-ENOTTY);
+ return (ret);
+ }
+
+ if (ret != len)
+ return (-EINVAL);
+
+ return (0);
+}
+
+/* Entry point for FICLONERANGE, before Linux 4.5. */
+long
+zpl_ioctl_ficlonerange(struct file *dst_file, void __user *arg)
+{
+ zfs_ioc_compat_file_clone_range_t fcr;
+
+ if (copy_from_user(&fcr, arg, sizeof (fcr)))
+ return (-EFAULT);
+
+ struct file *src_file = fget(fcr.fcr_src_fd);
+ if (src_file == NULL)
+ return (-EBADF);
+
+ if (dst_file->f_op != src_file->f_op)
+ return (-EXDEV);
+
+ size_t len = fcr.fcr_src_length;
+ if (len == 0)
+ len = i_size_read(file_inode(src_file)) - fcr.fcr_src_offset;
+
+ ssize_t ret = __zpl_clone_file_range(src_file, fcr.fcr_src_offset,
+ dst_file, fcr.fcr_dest_offset, len);
+
+ fput(src_file);
+
+ if (ret < 0) {
+ if (ret == -EOPNOTSUPP)
+ return (-ENOTTY);
+ return (ret);
+ }
+
+ if (ret != len)
+ return (-EINVAL);
+
+ return (0);
+}
+
+/* Entry point for FIDEDUPERANGE, before Linux 4.5. */
+long
+zpl_ioctl_fideduperange(struct file *filp, void *arg)
+{
+ (void) arg;
+
+ /* No support for dedup yet */
+ return (-ENOTTY);
+}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c
index c5c230bee144..ad52a11aada0 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c
@@ -1,402 +1,411 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2011, Lawrence Livermore National Security, LLC.
* Copyright (c) 2023, Datto Inc. All rights reserved.
*/
#include <sys/zfs_znode.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_ctldir.h>
#include <sys/zpl.h>
static struct inode *
zpl_inode_alloc(struct super_block *sb)
{
struct inode *ip;
VERIFY3S(zfs_inode_alloc(sb, &ip), ==, 0);
inode_set_iversion(ip, 1);
return (ip);
}
static void
zpl_inode_destroy(struct inode *ip)
{
ASSERT(atomic_read(&ip->i_count) == 0);
zfs_inode_destroy(ip);
}
/*
* Called from __mark_inode_dirty() to reflect that something in the
* inode has changed. We use it to ensure the znode system attributes
* are always strictly update to date with respect to the inode.
*/
#ifdef HAVE_DIRTY_INODE_WITH_FLAGS
static void
zpl_dirty_inode(struct inode *ip, int flags)
{
fstrans_cookie_t cookie;
cookie = spl_fstrans_mark();
zfs_dirty_inode(ip, flags);
spl_fstrans_unmark(cookie);
}
#else
static void
zpl_dirty_inode(struct inode *ip)
{
fstrans_cookie_t cookie;
cookie = spl_fstrans_mark();
zfs_dirty_inode(ip, 0);
spl_fstrans_unmark(cookie);
}
#endif /* HAVE_DIRTY_INODE_WITH_FLAGS */
/*
* When ->drop_inode() is called its return value indicates if the
* inode should be evicted from the inode cache. If the inode is
* unhashed and has no links the default policy is to evict it
* immediately.
*
* The ->evict_inode() callback must minimally truncate the inode pages,
* and call clear_inode(). For 2.6.35 and later kernels this will
* simply update the inode state, with the sync occurring before the
* truncate in evict(). For earlier kernels clear_inode() maps to
* end_writeback() which is responsible for completing all outstanding
* write back. In either case, once this is done it is safe to cleanup
* any remaining inode specific data via zfs_inactive().
* remaining filesystem specific data.
*/
static void
zpl_evict_inode(struct inode *ip)
{
fstrans_cookie_t cookie;
cookie = spl_fstrans_mark();
truncate_setsize(ip, 0);
clear_inode(ip);
zfs_inactive(ip);
spl_fstrans_unmark(cookie);
}
static void
zpl_put_super(struct super_block *sb)
{
fstrans_cookie_t cookie;
int error;
cookie = spl_fstrans_mark();
error = -zfs_umount(sb);
spl_fstrans_unmark(cookie);
ASSERT3S(error, <=, 0);
}
static int
zpl_sync_fs(struct super_block *sb, int wait)
{
fstrans_cookie_t cookie;
cred_t *cr = CRED();
int error;
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfs_sync(sb, wait, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
static int
zpl_statfs(struct dentry *dentry, struct kstatfs *statp)
{
fstrans_cookie_t cookie;
int error;
cookie = spl_fstrans_mark();
error = -zfs_statvfs(dentry->d_inode, statp);
spl_fstrans_unmark(cookie);
ASSERT3S(error, <=, 0);
/*
* If required by a 32-bit system call, dynamically scale the
* block size up to 16MiB and decrease the block counts. This
* allows for a maximum size of 64EiB to be reported. The file
* counts must be artificially capped at 2^32-1.
*/
if (unlikely(zpl_is_32bit_api())) {
while (statp->f_blocks > UINT32_MAX &&
statp->f_bsize < SPA_MAXBLOCKSIZE) {
statp->f_frsize <<= 1;
statp->f_bsize <<= 1;
statp->f_blocks >>= 1;
statp->f_bfree >>= 1;
statp->f_bavail >>= 1;
}
uint64_t usedobjs = statp->f_files - statp->f_ffree;
statp->f_ffree = MIN(statp->f_ffree, UINT32_MAX - usedobjs);
statp->f_files = statp->f_ffree + usedobjs;
}
return (error);
}
static int
zpl_remount_fs(struct super_block *sb, int *flags, char *data)
{
zfs_mnt_t zm = { .mnt_osname = NULL, .mnt_data = data };
fstrans_cookie_t cookie;
int error;
cookie = spl_fstrans_mark();
error = -zfs_remount(sb, flags, &zm);
spl_fstrans_unmark(cookie);
ASSERT3S(error, <=, 0);
return (error);
}
static int
__zpl_show_devname(struct seq_file *seq, zfsvfs_t *zfsvfs)
{
int error;
if ((error = zpl_enter(zfsvfs, FTAG)) != 0)
return (error);
char *fsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
dmu_objset_name(zfsvfs->z_os, fsname);
for (int i = 0; fsname[i] != 0; i++) {
/*
* Spaces in the dataset name must be converted to their
* octal escape sequence for getmntent(3) to correctly
* parse then fsname portion of /proc/self/mounts.
*/
if (fsname[i] == ' ') {
seq_puts(seq, "\\040");
} else {
seq_putc(seq, fsname[i]);
}
}
kmem_free(fsname, ZFS_MAX_DATASET_NAME_LEN);
zpl_exit(zfsvfs, FTAG);
return (0);
}
static int
zpl_show_devname(struct seq_file *seq, struct dentry *root)
{
return (__zpl_show_devname(seq, root->d_sb->s_fs_info));
}
static int
__zpl_show_options(struct seq_file *seq, zfsvfs_t *zfsvfs)
{
seq_printf(seq, ",%s",
zfsvfs->z_flags & ZSB_XATTR ? "xattr" : "noxattr");
#ifdef CONFIG_FS_POSIX_ACL
switch (zfsvfs->z_acl_type) {
case ZFS_ACLTYPE_POSIX:
seq_puts(seq, ",posixacl");
break;
default:
seq_puts(seq, ",noacl");
break;
}
#endif /* CONFIG_FS_POSIX_ACL */
switch (zfsvfs->z_case) {
case ZFS_CASE_SENSITIVE:
seq_puts(seq, ",casesensitive");
break;
case ZFS_CASE_INSENSITIVE:
seq_puts(seq, ",caseinsensitive");
break;
default:
seq_puts(seq, ",casemixed");
break;
}
return (0);
}
static int
zpl_show_options(struct seq_file *seq, struct dentry *root)
{
return (__zpl_show_options(seq, root->d_sb->s_fs_info));
}
static int
zpl_fill_super(struct super_block *sb, void *data, int silent)
{
zfs_mnt_t *zm = (zfs_mnt_t *)data;
fstrans_cookie_t cookie;
int error;
cookie = spl_fstrans_mark();
error = -zfs_domount(sb, zm, silent);
spl_fstrans_unmark(cookie);
ASSERT3S(error, <=, 0);
return (error);
}
static int
zpl_test_super(struct super_block *s, void *data)
{
zfsvfs_t *zfsvfs = s->s_fs_info;
objset_t *os = data;
- int match;
-
/*
* If the os doesn't match the z_os in the super_block, assume it is
* not a match. Matching would imply a multimount of a dataset. It is
* possible that during a multimount, there is a simultaneous operation
* that changes the z_os, e.g., rollback, where the match will be
* missed, but in that case the user will get an EBUSY.
*/
- if (zfsvfs == NULL || os != zfsvfs->z_os)
- return (0);
-
- /*
- * If they do match, recheck with the lock held to prevent mounting the
- * wrong dataset since z_os can be stale when the teardown lock is held.
- */
- if (zpl_enter(zfsvfs, FTAG) != 0)
- return (0);
- match = (os == zfsvfs->z_os);
- zpl_exit(zfsvfs, FTAG);
-
- return (match);
+ return (zfsvfs != NULL && os == zfsvfs->z_os);
}
static struct super_block *
zpl_mount_impl(struct file_system_type *fs_type, int flags, zfs_mnt_t *zm)
{
struct super_block *s;
objset_t *os;
int err;
err = dmu_objset_hold(zm->mnt_osname, FTAG, &os);
if (err)
return (ERR_PTR(-err));
/*
* The dsl pool lock must be released prior to calling sget().
* It is possible sget() may block on the lock in grab_super()
* while deactivate_super() holds that same lock and waits for
* a txg sync. If the dsl_pool lock is held over sget()
* this can prevent the pool sync and cause a deadlock.
*/
dsl_dataset_long_hold(dmu_objset_ds(os), FTAG);
dsl_pool_rele(dmu_objset_pool(os), FTAG);
s = sget(fs_type, zpl_test_super, set_anon_super, flags, os);
+ /*
+ * Recheck with the lock held to prevent mounting the wrong dataset
+ * since z_os can be stale when the teardown lock is held.
+ *
+ * We can't do this in zpl_test_super in since it's under spinlock and
+ * also s_umount lock is not held there so it would race with
+ * zfs_umount and zfsvfs can be freed.
+ */
+ if (!IS_ERR(s) && s->s_fs_info != NULL) {
+ zfsvfs_t *zfsvfs = s->s_fs_info;
+ if (zpl_enter(zfsvfs, FTAG) == 0) {
+ if (os != zfsvfs->z_os)
+ err = -SET_ERROR(EBUSY);
+ zpl_exit(zfsvfs, FTAG);
+ } else {
+ err = -SET_ERROR(EBUSY);
+ }
+ }
dsl_dataset_long_rele(dmu_objset_ds(os), FTAG);
dsl_dataset_rele(dmu_objset_ds(os), FTAG);
if (IS_ERR(s))
return (ERR_CAST(s));
+ if (err) {
+ deactivate_locked_super(s);
+ return (ERR_PTR(err));
+ }
+
if (s->s_root == NULL) {
err = zpl_fill_super(s, zm, flags & SB_SILENT ? 1 : 0);
if (err) {
deactivate_locked_super(s);
return (ERR_PTR(err));
}
s->s_flags |= SB_ACTIVE;
} else if ((flags ^ s->s_flags) & SB_RDONLY) {
deactivate_locked_super(s);
return (ERR_PTR(-EBUSY));
}
return (s);
}
static struct dentry *
zpl_mount(struct file_system_type *fs_type, int flags,
const char *osname, void *data)
{
zfs_mnt_t zm = { .mnt_osname = osname, .mnt_data = data };
struct super_block *sb = zpl_mount_impl(fs_type, flags, &zm);
if (IS_ERR(sb))
return (ERR_CAST(sb));
return (dget(sb->s_root));
}
static void
zpl_kill_sb(struct super_block *sb)
{
zfs_preumount(sb);
kill_anon_super(sb);
}
void
zpl_prune_sb(int64_t nr_to_scan, void *arg)
{
struct super_block *sb = (struct super_block *)arg;
int objects = 0;
(void) -zfs_prune(sb, nr_to_scan, &objects);
}
const struct super_operations zpl_super_operations = {
.alloc_inode = zpl_inode_alloc,
.destroy_inode = zpl_inode_destroy,
.dirty_inode = zpl_dirty_inode,
.write_inode = NULL,
.evict_inode = zpl_evict_inode,
.put_super = zpl_put_super,
.sync_fs = zpl_sync_fs,
.statfs = zpl_statfs,
.remount_fs = zpl_remount_fs,
.show_devname = zpl_show_devname,
.show_options = zpl_show_options,
.show_stats = NULL,
};
struct file_system_type zpl_fs_type = {
.owner = THIS_MODULE,
.name = ZFS_DRIVER,
#if defined(HAVE_IDMAP_MNT_API)
.fs_flags = FS_USERNS_MOUNT | FS_ALLOW_IDMAP,
#else
.fs_flags = FS_USERNS_MOUNT,
#endif
.mount = zpl_mount,
.kill_sb = zpl_kill_sb,
};
diff --git a/sys/contrib/openzfs/module/zfs/bpobj.c b/sys/contrib/openzfs/module/zfs/bpobj.c
index 211bab56519c..e772caead29b 100644
--- a/sys/contrib/openzfs/module/zfs/bpobj.c
+++ b/sys/contrib/openzfs/module/zfs/bpobj.c
@@ -1,1005 +1,1030 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2017 Datto Inc.
*/
#include <sys/bpobj.h>
#include <sys/zfs_context.h>
#include <sys/zfs_refcount.h>
#include <sys/dsl_pool.h>
#include <sys/zfeature.h>
#include <sys/zap.h>
/*
* Return an empty bpobj, preferably the empty dummy one (dp_empty_bpobj).
*/
uint64_t
bpobj_alloc_empty(objset_t *os, int blocksize, dmu_tx_t *tx)
{
spa_t *spa = dmu_objset_spa(os);
dsl_pool_t *dp = dmu_objset_pool(os);
if (spa_feature_is_enabled(spa, SPA_FEATURE_EMPTY_BPOBJ)) {
if (!spa_feature_is_active(spa, SPA_FEATURE_EMPTY_BPOBJ)) {
ASSERT0(dp->dp_empty_bpobj);
dp->dp_empty_bpobj =
bpobj_alloc(os, SPA_OLD_MAXBLOCKSIZE, tx);
VERIFY(zap_add(os,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
&dp->dp_empty_bpobj, tx) == 0);
}
spa_feature_incr(spa, SPA_FEATURE_EMPTY_BPOBJ, tx);
ASSERT(dp->dp_empty_bpobj != 0);
return (dp->dp_empty_bpobj);
} else {
return (bpobj_alloc(os, blocksize, tx));
}
}
void
bpobj_decr_empty(objset_t *os, dmu_tx_t *tx)
{
dsl_pool_t *dp = dmu_objset_pool(os);
spa_feature_decr(dmu_objset_spa(os), SPA_FEATURE_EMPTY_BPOBJ, tx);
if (!spa_feature_is_active(dmu_objset_spa(os),
SPA_FEATURE_EMPTY_BPOBJ)) {
VERIFY3U(0, ==, zap_remove(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_EMPTY_BPOBJ, tx));
VERIFY3U(0, ==, dmu_object_free(os, dp->dp_empty_bpobj, tx));
dp->dp_empty_bpobj = 0;
}
}
uint64_t
bpobj_alloc(objset_t *os, int blocksize, dmu_tx_t *tx)
{
int size;
if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_BPOBJ_ACCOUNT)
size = BPOBJ_SIZE_V0;
else if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_DEADLISTS)
size = BPOBJ_SIZE_V1;
else if (!spa_feature_is_active(dmu_objset_spa(os),
SPA_FEATURE_LIVELIST))
size = BPOBJ_SIZE_V2;
else
size = sizeof (bpobj_phys_t);
return (dmu_object_alloc(os, DMU_OT_BPOBJ, blocksize,
DMU_OT_BPOBJ_HDR, size, tx));
}
void
bpobj_free(objset_t *os, uint64_t obj, dmu_tx_t *tx)
{
int64_t i;
bpobj_t bpo;
dmu_object_info_t doi;
int epb;
dmu_buf_t *dbuf = NULL;
ASSERT(obj != dmu_objset_pool(os)->dp_empty_bpobj);
VERIFY3U(0, ==, bpobj_open(&bpo, os, obj));
mutex_enter(&bpo.bpo_lock);
if (!bpo.bpo_havesubobj || bpo.bpo_phys->bpo_subobjs == 0)
goto out;
VERIFY3U(0, ==, dmu_object_info(os, bpo.bpo_phys->bpo_subobjs, &doi));
epb = doi.doi_data_block_size / sizeof (uint64_t);
for (i = bpo.bpo_phys->bpo_num_subobjs - 1; i >= 0; i--) {
uint64_t *objarray;
uint64_t offset, blkoff;
offset = i * sizeof (uint64_t);
blkoff = P2PHASE(i, epb);
if (dbuf == NULL || dbuf->db_offset > offset) {
if (dbuf)
dmu_buf_rele(dbuf, FTAG);
VERIFY3U(0, ==, dmu_buf_hold(os,
bpo.bpo_phys->bpo_subobjs, offset, FTAG, &dbuf, 0));
}
ASSERT3U(offset, >=, dbuf->db_offset);
ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
objarray = dbuf->db_data;
bpobj_free(os, objarray[blkoff], tx);
}
if (dbuf) {
dmu_buf_rele(dbuf, FTAG);
dbuf = NULL;
}
VERIFY3U(0, ==, dmu_object_free(os, bpo.bpo_phys->bpo_subobjs, tx));
out:
mutex_exit(&bpo.bpo_lock);
bpobj_close(&bpo);
VERIFY3U(0, ==, dmu_object_free(os, obj, tx));
}
int
bpobj_open(bpobj_t *bpo, objset_t *os, uint64_t object)
{
dmu_object_info_t doi;
int err;
err = dmu_object_info(os, object, &doi);
if (err)
return (err);
memset(bpo, 0, sizeof (*bpo));
mutex_init(&bpo->bpo_lock, NULL, MUTEX_DEFAULT, NULL);
ASSERT(bpo->bpo_dbuf == NULL);
ASSERT(bpo->bpo_phys == NULL);
ASSERT(object != 0);
ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ);
ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_BPOBJ_HDR);
err = dmu_bonus_hold(os, object, bpo, &bpo->bpo_dbuf);
if (err)
return (err);
bpo->bpo_os = os;
bpo->bpo_object = object;
bpo->bpo_epb = doi.doi_data_block_size >> SPA_BLKPTRSHIFT;
bpo->bpo_havecomp = (doi.doi_bonus_size > BPOBJ_SIZE_V0);
bpo->bpo_havesubobj = (doi.doi_bonus_size > BPOBJ_SIZE_V1);
bpo->bpo_havefreed = (doi.doi_bonus_size > BPOBJ_SIZE_V2);
bpo->bpo_phys = bpo->bpo_dbuf->db_data;
return (0);
}
boolean_t
bpobj_is_open(const bpobj_t *bpo)
{
return (bpo->bpo_object != 0);
}
void
bpobj_close(bpobj_t *bpo)
{
/* Lame workaround for closing a bpobj that was never opened. */
if (bpo->bpo_object == 0)
return;
dmu_buf_rele(bpo->bpo_dbuf, bpo);
if (bpo->bpo_cached_dbuf != NULL)
dmu_buf_rele(bpo->bpo_cached_dbuf, bpo);
bpo->bpo_dbuf = NULL;
bpo->bpo_phys = NULL;
bpo->bpo_cached_dbuf = NULL;
bpo->bpo_object = 0;
mutex_destroy(&bpo->bpo_lock);
}
static boolean_t
bpobj_is_empty_impl(bpobj_t *bpo)
{
ASSERT(MUTEX_HELD(&bpo->bpo_lock));
return (bpo->bpo_phys->bpo_num_blkptrs == 0 &&
(!bpo->bpo_havesubobj || bpo->bpo_phys->bpo_num_subobjs == 0));
}
boolean_t
bpobj_is_empty(bpobj_t *bpo)
{
mutex_enter(&bpo->bpo_lock);
boolean_t is_empty = bpobj_is_empty_impl(bpo);
mutex_exit(&bpo->bpo_lock);
return (is_empty);
}
/*
* A recursive iteration of the bpobjs would be nice here but we run the risk
* of overflowing function stack space. Instead, find each subobj and add it
* to the head of our list so it can be scanned for subjobjs. Like a
* recursive implementation, the "deepest" subobjs will be freed first.
* When a subobj is found to have no additional subojs, free it.
*/
typedef struct bpobj_info {
bpobj_t *bpi_bpo;
/*
* This object is a subobj of bpi_parent,
* at bpi_index in its subobj array.
*/
struct bpobj_info *bpi_parent;
uint64_t bpi_index;
/* How many of our subobj's are left to process. */
uint64_t bpi_unprocessed_subobjs;
/* True after having visited this bpo's directly referenced BPs. */
boolean_t bpi_visited;
list_node_t bpi_node;
} bpobj_info_t;
static bpobj_info_t *
bpi_alloc(bpobj_t *bpo, bpobj_info_t *parent, uint64_t index)
{
bpobj_info_t *bpi = kmem_zalloc(sizeof (bpobj_info_t), KM_SLEEP);
bpi->bpi_bpo = bpo;
bpi->bpi_parent = parent;
bpi->bpi_index = index;
if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
bpi->bpi_unprocessed_subobjs = bpo->bpo_phys->bpo_num_subobjs;
}
return (bpi);
}
/*
* Update bpobj and all of its parents with new space accounting.
*/
static void
propagate_space_reduction(bpobj_info_t *bpi, int64_t freed,
int64_t comp_freed, int64_t uncomp_freed, dmu_tx_t *tx)
{
for (; bpi != NULL; bpi = bpi->bpi_parent) {
bpobj_t *p = bpi->bpi_bpo;
ASSERT(dmu_buf_is_dirty(p->bpo_dbuf, tx));
p->bpo_phys->bpo_bytes -= freed;
ASSERT3S(p->bpo_phys->bpo_bytes, >=, 0);
if (p->bpo_havecomp) {
p->bpo_phys->bpo_comp -= comp_freed;
p->bpo_phys->bpo_uncomp -= uncomp_freed;
}
}
}
static int
bpobj_iterate_blkptrs(bpobj_info_t *bpi, bpobj_itor_t func, void *arg,
int64_t start, dmu_tx_t *tx, boolean_t free)
{
int err = 0;
int64_t freed = 0, comp_freed = 0, uncomp_freed = 0;
dmu_buf_t *dbuf = NULL;
bpobj_t *bpo = bpi->bpi_bpo;
- for (int64_t i = bpo->bpo_phys->bpo_num_blkptrs - 1; i >= start; i--) {
+ int64_t i = bpo->bpo_phys->bpo_num_blkptrs - 1;
+ uint64_t pe = P2ALIGN_TYPED(i, bpo->bpo_epb, uint64_t) *
+ sizeof (blkptr_t);
+ uint64_t ps = start * sizeof (blkptr_t);
+ uint64_t pb = MAX((pe > dmu_prefetch_max) ? pe - dmu_prefetch_max : 0,
+ ps);
+ if (pe > pb) {
+ dmu_prefetch(bpo->bpo_os, bpo->bpo_object, 0, pb, pe - pb,
+ ZIO_PRIORITY_ASYNC_READ);
+ }
+ for (; i >= start; i--) {
uint64_t offset = i * sizeof (blkptr_t);
uint64_t blkoff = P2PHASE(i, bpo->bpo_epb);
if (dbuf == NULL || dbuf->db_offset > offset) {
if (dbuf)
dmu_buf_rele(dbuf, FTAG);
err = dmu_buf_hold(bpo->bpo_os, bpo->bpo_object,
- offset, FTAG, &dbuf, 0);
+ offset, FTAG, &dbuf, DMU_READ_NO_PREFETCH);
if (err)
break;
+ pe = pb;
+ pb = MAX((dbuf->db_offset > dmu_prefetch_max) ?
+ dbuf->db_offset - dmu_prefetch_max : 0, ps);
+ if (pe > pb) {
+ dmu_prefetch(bpo->bpo_os, bpo->bpo_object, 0,
+ pb, pe - pb, ZIO_PRIORITY_ASYNC_READ);
+ }
}
ASSERT3U(offset, >=, dbuf->db_offset);
ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
blkptr_t *bparray = dbuf->db_data;
blkptr_t *bp = &bparray[blkoff];
boolean_t bp_freed = BP_GET_FREE(bp);
err = func(arg, bp, bp_freed, tx);
if (err)
break;
if (free) {
int sign = bp_freed ? -1 : +1;
spa_t *spa = dmu_objset_spa(bpo->bpo_os);
freed += sign * bp_get_dsize_sync(spa, bp);
comp_freed += sign * BP_GET_PSIZE(bp);
uncomp_freed += sign * BP_GET_UCSIZE(bp);
ASSERT(dmu_buf_is_dirty(bpo->bpo_dbuf, tx));
bpo->bpo_phys->bpo_num_blkptrs--;
ASSERT3S(bpo->bpo_phys->bpo_num_blkptrs, >=, 0);
if (bp_freed) {
ASSERT(bpo->bpo_havefreed);
bpo->bpo_phys->bpo_num_freed--;
ASSERT3S(bpo->bpo_phys->bpo_num_freed, >=, 0);
}
}
}
if (free) {
propagate_space_reduction(bpi, freed, comp_freed,
uncomp_freed, tx);
VERIFY0(dmu_free_range(bpo->bpo_os,
bpo->bpo_object,
bpo->bpo_phys->bpo_num_blkptrs * sizeof (blkptr_t),
DMU_OBJECT_END, tx));
}
if (dbuf) {
dmu_buf_rele(dbuf, FTAG);
dbuf = NULL;
}
return (err);
}
/*
* Given an initial bpo, start by freeing the BPs that are directly referenced
* by that bpo. If the bpo has subobjs, read in its last subobj and push the
* subobj to our stack. By popping items off our stack, eventually we will
* encounter a bpo that has no subobjs. We can free its bpobj_info_t, and if
* requested also free the now-empty bpo from disk and decrement
* its parent's subobj count. We continue popping each subobj from our stack,
* visiting its last subobj until they too have no more subobjs, and so on.
*/
static int
bpobj_iterate_impl(bpobj_t *initial_bpo, bpobj_itor_t func, void *arg,
dmu_tx_t *tx, boolean_t free, uint64_t *bpobj_size)
{
list_t stack;
bpobj_info_t *bpi;
int err = 0;
/*
* Create a "stack" for us to work with without worrying about
* stack overflows. Initialize it with the initial_bpo.
*/
list_create(&stack, sizeof (bpobj_info_t),
offsetof(bpobj_info_t, bpi_node));
mutex_enter(&initial_bpo->bpo_lock);
if (bpobj_size != NULL)
*bpobj_size = initial_bpo->bpo_phys->bpo_num_blkptrs;
list_insert_head(&stack, bpi_alloc(initial_bpo, NULL, 0));
while ((bpi = list_head(&stack)) != NULL) {
bpobj_t *bpo = bpi->bpi_bpo;
ASSERT3P(bpo, !=, NULL);
ASSERT(MUTEX_HELD(&bpo->bpo_lock));
ASSERT(bpobj_is_open(bpo));
if (free)
dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
if (bpi->bpi_visited == B_FALSE) {
err = bpobj_iterate_blkptrs(bpi, func, arg, 0, tx,
free);
bpi->bpi_visited = B_TRUE;
if (err != 0)
break;
}
/*
* We've finished with this bpo's directly-referenced BP's and
* it has no more unprocessed subobjs. We can free its
* bpobj_info_t (unless it is the topmost, initial_bpo).
* If we are freeing from disk, we can also do that.
*/
if (bpi->bpi_unprocessed_subobjs == 0) {
/*
* If there are no entries, there should
* be no bytes.
*/
if (bpobj_is_empty_impl(bpo)) {
ASSERT0(bpo->bpo_phys->bpo_bytes);
ASSERT0(bpo->bpo_phys->bpo_comp);
ASSERT0(bpo->bpo_phys->bpo_uncomp);
}
/* The initial_bpo has no parent and is not closed. */
if (bpi->bpi_parent != NULL) {
if (free) {
bpobj_t *p = bpi->bpi_parent->bpi_bpo;
ASSERT0(bpo->bpo_phys->bpo_num_blkptrs);
ASSERT3U(p->bpo_phys->bpo_num_subobjs,
>, 0);
ASSERT3U(bpi->bpi_index, ==,
p->bpo_phys->bpo_num_subobjs - 1);
ASSERT(dmu_buf_is_dirty(bpo->bpo_dbuf,
tx));
p->bpo_phys->bpo_num_subobjs--;
VERIFY0(dmu_free_range(p->bpo_os,
p->bpo_phys->bpo_subobjs,
bpi->bpi_index * sizeof (uint64_t),
sizeof (uint64_t), tx));
/* eliminate the empty subobj list */
if (bpo->bpo_havesubobj &&
bpo->bpo_phys->bpo_subobjs != 0) {
ASSERT0(bpo->bpo_phys->
bpo_num_subobjs);
err = dmu_object_free(
bpo->bpo_os,
bpo->bpo_phys->bpo_subobjs,
tx);
if (err)
break;
bpo->bpo_phys->bpo_subobjs = 0;
}
err = dmu_object_free(p->bpo_os,
bpo->bpo_object, tx);
if (err)
break;
}
mutex_exit(&bpo->bpo_lock);
bpobj_close(bpo);
kmem_free(bpo, sizeof (bpobj_t));
} else {
mutex_exit(&bpo->bpo_lock);
}
/*
* Finished processing this bpo. Unlock, and free
* our "stack" info.
*/
list_remove_head(&stack);
kmem_free(bpi, sizeof (bpobj_info_t));
} else {
/*
* We have unprocessed subobjs. Process the next one.
*/
ASSERT(bpo->bpo_havecomp);
ASSERT3P(bpobj_size, ==, NULL);
/* Add the last subobj to stack. */
int64_t i = bpi->bpi_unprocessed_subobjs - 1;
uint64_t offset = i * sizeof (uint64_t);
- uint64_t obj_from_sublist;
+ uint64_t subobj;
err = dmu_read(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
- offset, sizeof (uint64_t), &obj_from_sublist,
- DMU_READ_PREFETCH);
+ offset, sizeof (uint64_t), &subobj,
+ DMU_READ_NO_PREFETCH);
if (err)
break;
- bpobj_t *sublist = kmem_alloc(sizeof (bpobj_t),
- KM_SLEEP);
- err = bpobj_open(sublist, bpo->bpo_os,
- obj_from_sublist);
- if (err)
+ bpobj_t *subbpo = kmem_alloc(sizeof (bpobj_t),
+ KM_SLEEP);
+ err = bpobj_open(subbpo, bpo->bpo_os, subobj);
+ if (err) {
+ kmem_free(subbpo, sizeof (bpobj_t));
break;
+ }
+
+ if (subbpo->bpo_havesubobj &&
+ subbpo->bpo_phys->bpo_subobjs != 0) {
+ dmu_prefetch(subbpo->bpo_os,
+ subbpo->bpo_phys->bpo_subobjs, 0, 0, 0,
+ ZIO_PRIORITY_ASYNC_READ);
+ }
- list_insert_head(&stack, bpi_alloc(sublist, bpi, i));
- mutex_enter(&sublist->bpo_lock);
+ list_insert_head(&stack, bpi_alloc(subbpo, bpi, i));
+ mutex_enter(&subbpo->bpo_lock);
bpi->bpi_unprocessed_subobjs--;
}
}
/*
* Cleanup anything left on the "stack" after we left the loop.
* Every bpo on the stack is locked so we must remember to undo
* that now (in LIFO order).
*/
while ((bpi = list_remove_head(&stack)) != NULL) {
bpobj_t *bpo = bpi->bpi_bpo;
ASSERT(err != 0);
ASSERT3P(bpo, !=, NULL);
mutex_exit(&bpo->bpo_lock);
/* do not free the initial_bpo */
if (bpi->bpi_parent != NULL) {
bpobj_close(bpi->bpi_bpo);
kmem_free(bpi->bpi_bpo, sizeof (bpobj_t));
}
kmem_free(bpi, sizeof (bpobj_info_t));
}
list_destroy(&stack);
return (err);
}
/*
* Iterate and remove the entries. If func returns nonzero, iteration
* will stop and that entry will not be removed.
*/
int
bpobj_iterate(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx)
{
return (bpobj_iterate_impl(bpo, func, arg, tx, B_TRUE, NULL));
}
/*
* Iterate the entries. If func returns nonzero, iteration will stop.
*
* If there are no subobjs:
*
* *bpobj_size can be used to return the number of block pointers in the
* bpobj. Note that this may be different from the number of block pointers
* that are iterated over, if iteration is terminated early (e.g. by the func
* returning nonzero).
*
* If there are concurrent (or subsequent) modifications to the bpobj then the
* returned *bpobj_size can be passed as "start" to
* livelist_bpobj_iterate_from_nofree() to iterate the newly added entries.
*/
int
bpobj_iterate_nofree(bpobj_t *bpo, bpobj_itor_t func, void *arg,
uint64_t *bpobj_size)
{
return (bpobj_iterate_impl(bpo, func, arg, NULL, B_FALSE, bpobj_size));
}
/*
* Iterate over the blkptrs in the bpobj beginning at index start. If func
* returns nonzero, iteration will stop. This is a livelist specific function
* since it assumes that there are no subobjs present.
*/
int
livelist_bpobj_iterate_from_nofree(bpobj_t *bpo, bpobj_itor_t func, void *arg,
int64_t start)
{
if (bpo->bpo_havesubobj)
VERIFY0(bpo->bpo_phys->bpo_subobjs);
bpobj_info_t *bpi = bpi_alloc(bpo, NULL, 0);
int err = bpobj_iterate_blkptrs(bpi, func, arg, start, NULL, B_FALSE);
kmem_free(bpi, sizeof (bpobj_info_t));
return (err);
}
/*
* Logically add subobj's contents to the parent bpobj.
*
* In the most general case, this is accomplished in constant time by adding
* a reference to subobj. This case is used when enqueuing a large subobj:
* +--------------+ +--------------+
* | bpobj |----------------------->| subobj list |
* +----+----+----+----+----+ +-----+-----+--+--+
* | bp | bp | bp | bp | bp | | obj | obj | obj |
* +----+----+----+----+----+ +-----+-----+-----+
*
* +--------------+ +--------------+
* | sub-bpobj |----------------------> | subsubobj |
* +----+----+----+----+---------+----+ +-----+-----+--+--------+-----+
* | bp | bp | bp | bp | ... | bp | | obj | obj | ... | obj |
* +----+----+----+----+---------+----+ +-----+-----+-----------+-----+
*
* Result: sub-bpobj added to parent's subobj list.
* +--------------+ +--------------+
* | bpobj |----------------------->| subobj list |
* +----+----+----+----+----+ +-----+-----+--+--+-----+
* | bp | bp | bp | bp | bp | | obj | obj | obj | OBJ |
* +----+----+----+----+----+ +-----+-----+-----+--|--+
* |
* /-----------------------------------------------------/
* v
* +--------------+ +--------------+
* | sub-bpobj |----------------------> | subsubobj |
* +----+----+----+----+---------+----+ +-----+-----+--+--------+-----+
* | bp | bp | bp | bp | ... | bp | | obj | obj | ... | obj |
* +----+----+----+----+---------+----+ +-----+-----+-----------+-----+
*
*
* In a common case, the subobj is small: its bp's and its list of subobj's
* are each stored in a single block. In this case we copy the subobj's
* contents to the parent:
* +--------------+ +--------------+
* | bpobj |----------------------->| subobj list |
* +----+----+----+----+----+ +-----+-----+--+--+
* | bp | bp | bp | bp | bp | | obj | obj | obj |
* +----+----+----+----+----+ +-----+-----+-----+
* ^ ^
* +--------------+ | +--------------+ |
* | sub-bpobj |---------^------------> | subsubobj | ^
* +----+----+----+ | +-----+-----+--+ |
* | BP | BP |-->-->-->-->-/ | OBJ | OBJ |-->-/
* +----+----+ +-----+-----+
*
* Result: subobj destroyed, contents copied to parent:
* +--------------+ +--------------+
* | bpobj |----------------------->| subobj list |
* +----+----+----+----+----+----+----+ +-----+-----+--+--+-----+-----+
* | bp | bp | bp | bp | bp | BP | BP | | obj | obj | obj | OBJ | OBJ |
* +----+----+----+----+----+----+----+ +-----+-----+-----+-----+-----+
*
*
* If the subobj has many BP's but few subobj's, we can copy the sub-subobj's
* but retain the sub-bpobj:
* +--------------+ +--------------+
* | bpobj |----------------------->| subobj list |
* +----+----+----+----+----+ +-----+-----+--+--+
* | bp | bp | bp | bp | bp | | obj | obj | obj |
* +----+----+----+----+----+ +-----+-----+-----+
* ^
* +--------------+ +--------------+ |
* | sub-bpobj |----------------------> | subsubobj | ^
* +----+----+----+----+---------+----+ +-----+-----+--+ |
* | bp | bp | bp | bp | ... | bp | | OBJ | OBJ |-->-/
* +----+----+----+----+---------+----+ +-----+-----+
*
* Result: sub-sub-bpobjs and subobj added to parent's subobj list.
* +--------------+ +--------------+
* | bpobj |-------------------->| subobj list |
* +----+----+----+----+----+ +-----+-----+--+--+-----+-----+------+
* | bp | bp | bp | bp | bp | | obj | obj | obj | OBJ | OBJ | OBJ* |
* +----+----+----+----+----+ +-----+-----+-----+-----+-----+--|---+
* |
* /--------------------------------------------------------------/
* v
* +--------------+
* | sub-bpobj |
* +----+----+----+----+---------+----+
* | bp | bp | bp | bp | ... | bp |
* +----+----+----+----+---------+----+
*/
void
bpobj_enqueue_subobj(bpobj_t *bpo, uint64_t subobj, dmu_tx_t *tx)
{
bpobj_t subbpo;
uint64_t used, comp, uncomp, subsubobjs;
boolean_t copy_subsub = B_TRUE;
boolean_t copy_bps = B_TRUE;
ASSERT(bpobj_is_open(bpo));
ASSERT(subobj != 0);
ASSERT(bpo->bpo_havesubobj);
ASSERT(bpo->bpo_havecomp);
ASSERT(bpo->bpo_object != dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj);
if (subobj == dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj) {
bpobj_decr_empty(bpo->bpo_os, tx);
return;
}
VERIFY3U(0, ==, bpobj_open(&subbpo, bpo->bpo_os, subobj));
if (bpobj_is_empty(&subbpo)) {
/* No point in having an empty subobj. */
bpobj_close(&subbpo);
bpobj_free(bpo->bpo_os, subobj, tx);
return;
}
VERIFY3U(0, ==, bpobj_space(&subbpo, &used, &comp, &uncomp));
mutex_enter(&bpo->bpo_lock);
dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
dmu_object_info_t doi;
if (bpo->bpo_phys->bpo_subobjs != 0) {
ASSERT0(dmu_object_info(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
&doi));
ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ_SUBOBJ);
}
/*
* If subobj has only one block of subobjs, then move subobj's
* subobjs to bpo's subobj list directly. This reduces recursion in
* bpobj_iterate due to nested subobjs.
*/
subsubobjs = subbpo.bpo_phys->bpo_subobjs;
if (subsubobjs != 0) {
VERIFY0(dmu_object_info(bpo->bpo_os, subsubobjs, &doi));
if (doi.doi_max_offset > doi.doi_data_block_size) {
copy_subsub = B_FALSE;
}
}
/*
* If, in addition to having only one block of subobj's, subobj has
* only one block of bp's, then move subobj's bp's to bpo's bp list
* directly. This reduces recursion in bpobj_iterate due to nested
* subobjs.
*/
VERIFY3U(0, ==, dmu_object_info(bpo->bpo_os, subobj, &doi));
if (doi.doi_max_offset > doi.doi_data_block_size || !copy_subsub) {
copy_bps = B_FALSE;
}
if (copy_subsub && subsubobjs != 0) {
dmu_buf_t *subdb;
uint64_t numsubsub = subbpo.bpo_phys->bpo_num_subobjs;
VERIFY0(dmu_buf_hold(bpo->bpo_os, subsubobjs,
0, FTAG, &subdb, 0));
/*
* Make sure that we are not asking dmu_write()
* to write more data than we have in our buffer.
*/
VERIFY3U(subdb->db_size, >=,
numsubsub * sizeof (subobj));
if (bpo->bpo_phys->bpo_subobjs == 0) {
bpo->bpo_phys->bpo_subobjs =
dmu_object_alloc(bpo->bpo_os,
DMU_OT_BPOBJ_SUBOBJ, SPA_OLD_MAXBLOCKSIZE,
DMU_OT_NONE, 0, tx);
}
dmu_write(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj),
numsubsub * sizeof (subobj), subdb->db_data, tx);
dmu_buf_rele(subdb, FTAG);
bpo->bpo_phys->bpo_num_subobjs += numsubsub;
dmu_buf_will_dirty(subbpo.bpo_dbuf, tx);
subbpo.bpo_phys->bpo_subobjs = 0;
VERIFY0(dmu_object_free(bpo->bpo_os, subsubobjs, tx));
}
if (copy_bps) {
dmu_buf_t *bps;
uint64_t numbps = subbpo.bpo_phys->bpo_num_blkptrs;
ASSERT(copy_subsub);
VERIFY0(dmu_buf_hold(bpo->bpo_os, subobj,
0, FTAG, &bps, 0));
/*
* Make sure that we are not asking dmu_write()
* to write more data than we have in our buffer.
*/
VERIFY3U(bps->db_size, >=, numbps * sizeof (blkptr_t));
dmu_write(bpo->bpo_os, bpo->bpo_object,
bpo->bpo_phys->bpo_num_blkptrs * sizeof (blkptr_t),
numbps * sizeof (blkptr_t),
bps->db_data, tx);
dmu_buf_rele(bps, FTAG);
bpo->bpo_phys->bpo_num_blkptrs += numbps;
bpobj_close(&subbpo);
VERIFY0(dmu_object_free(bpo->bpo_os, subobj, tx));
} else {
bpobj_close(&subbpo);
if (bpo->bpo_phys->bpo_subobjs == 0) {
bpo->bpo_phys->bpo_subobjs =
dmu_object_alloc(bpo->bpo_os,
DMU_OT_BPOBJ_SUBOBJ, SPA_OLD_MAXBLOCKSIZE,
DMU_OT_NONE, 0, tx);
}
dmu_write(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj),
sizeof (subobj), &subobj, tx);
bpo->bpo_phys->bpo_num_subobjs++;
}
bpo->bpo_phys->bpo_bytes += used;
bpo->bpo_phys->bpo_comp += comp;
bpo->bpo_phys->bpo_uncomp += uncomp;
mutex_exit(&bpo->bpo_lock);
}
/*
* Prefetch metadata required for bpobj_enqueue_subobj().
*/
void
bpobj_prefetch_subobj(bpobj_t *bpo, uint64_t subobj)
{
dmu_object_info_t doi;
bpobj_t subbpo;
uint64_t subsubobjs;
boolean_t copy_subsub = B_TRUE;
boolean_t copy_bps = B_TRUE;
ASSERT(bpobj_is_open(bpo));
ASSERT(subobj != 0);
if (subobj == dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj)
return;
if (bpobj_open(&subbpo, bpo->bpo_os, subobj) != 0)
return;
if (bpobj_is_empty(&subbpo)) {
bpobj_close(&subbpo);
return;
}
subsubobjs = subbpo.bpo_phys->bpo_subobjs;
bpobj_close(&subbpo);
if (subsubobjs != 0) {
if (dmu_object_info(bpo->bpo_os, subsubobjs, &doi) != 0)
return;
if (doi.doi_max_offset > doi.doi_data_block_size)
copy_subsub = B_FALSE;
}
if (dmu_object_info(bpo->bpo_os, subobj, &doi) != 0)
return;
if (doi.doi_max_offset > doi.doi_data_block_size || !copy_subsub)
copy_bps = B_FALSE;
if (copy_subsub && subsubobjs != 0) {
if (bpo->bpo_phys->bpo_subobjs) {
dmu_prefetch(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs, 0,
bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj), 1,
ZIO_PRIORITY_ASYNC_READ);
}
dmu_prefetch(bpo->bpo_os, subsubobjs, 0, 0, 1,
ZIO_PRIORITY_ASYNC_READ);
}
if (copy_bps) {
dmu_prefetch(bpo->bpo_os, bpo->bpo_object, 0,
bpo->bpo_phys->bpo_num_blkptrs * sizeof (blkptr_t), 1,
ZIO_PRIORITY_ASYNC_READ);
dmu_prefetch(bpo->bpo_os, subobj, 0, 0, 1,
ZIO_PRIORITY_ASYNC_READ);
} else if (bpo->bpo_phys->bpo_subobjs) {
dmu_prefetch(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs, 0,
bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj), 1,
ZIO_PRIORITY_ASYNC_READ);
}
}
void
bpobj_enqueue(bpobj_t *bpo, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
blkptr_t stored_bp = *bp;
uint64_t offset;
int blkoff;
blkptr_t *bparray;
ASSERT(bpobj_is_open(bpo));
ASSERT(!BP_IS_HOLE(bp));
ASSERT(bpo->bpo_object != dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj);
if (BP_IS_EMBEDDED(bp)) {
/*
* The bpobj will compress better without the payload.
*
* Note that we store EMBEDDED bp's because they have an
* uncompressed size, which must be accounted for. An
* alternative would be to add their size to bpo_uncomp
* without storing the bp, but that would create additional
* complications: bpo_uncomp would be inconsistent with the
* set of BP's stored, and bpobj_iterate() wouldn't visit
* all the space accounted for in the bpobj.
*/
memset(&stored_bp, 0, sizeof (stored_bp));
stored_bp.blk_prop = bp->blk_prop;
stored_bp.blk_birth = bp->blk_birth;
} else if (!BP_GET_DEDUP(bp)) {
/* The bpobj will compress better without the checksum */
memset(&stored_bp.blk_cksum, 0, sizeof (stored_bp.blk_cksum));
}
stored_bp.blk_fill = 0;
BP_SET_FREE(&stored_bp, bp_freed);
mutex_enter(&bpo->bpo_lock);
offset = bpo->bpo_phys->bpo_num_blkptrs * sizeof (stored_bp);
blkoff = P2PHASE(bpo->bpo_phys->bpo_num_blkptrs, bpo->bpo_epb);
if (bpo->bpo_cached_dbuf == NULL ||
offset < bpo->bpo_cached_dbuf->db_offset ||
offset >= bpo->bpo_cached_dbuf->db_offset +
bpo->bpo_cached_dbuf->db_size) {
if (bpo->bpo_cached_dbuf)
dmu_buf_rele(bpo->bpo_cached_dbuf, bpo);
VERIFY3U(0, ==, dmu_buf_hold(bpo->bpo_os, bpo->bpo_object,
offset, bpo, &bpo->bpo_cached_dbuf, 0));
ASSERT3P(bpo->bpo_cached_dbuf, !=, NULL);
}
dmu_buf_will_dirty(bpo->bpo_cached_dbuf, tx);
bparray = bpo->bpo_cached_dbuf->db_data;
bparray[blkoff] = stored_bp;
dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
bpo->bpo_phys->bpo_num_blkptrs++;
int sign = bp_freed ? -1 : +1;
bpo->bpo_phys->bpo_bytes += sign *
bp_get_dsize_sync(dmu_objset_spa(bpo->bpo_os), bp);
if (bpo->bpo_havecomp) {
bpo->bpo_phys->bpo_comp += sign * BP_GET_PSIZE(bp);
bpo->bpo_phys->bpo_uncomp += sign * BP_GET_UCSIZE(bp);
}
if (bp_freed) {
ASSERT(bpo->bpo_havefreed);
bpo->bpo_phys->bpo_num_freed++;
}
mutex_exit(&bpo->bpo_lock);
}
struct space_range_arg {
spa_t *spa;
uint64_t mintxg;
uint64_t maxtxg;
uint64_t used;
uint64_t comp;
uint64_t uncomp;
};
static int
space_range_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx)
{
(void) bp_freed, (void) tx;
struct space_range_arg *sra = arg;
if (bp->blk_birth > sra->mintxg && bp->blk_birth <= sra->maxtxg) {
if (dsl_pool_sync_context(spa_get_dsl(sra->spa)))
sra->used += bp_get_dsize_sync(sra->spa, bp);
else
sra->used += bp_get_dsize(sra->spa, bp);
sra->comp += BP_GET_PSIZE(bp);
sra->uncomp += BP_GET_UCSIZE(bp);
}
return (0);
}
int
bpobj_space(bpobj_t *bpo, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
ASSERT(bpobj_is_open(bpo));
mutex_enter(&bpo->bpo_lock);
*usedp = bpo->bpo_phys->bpo_bytes;
if (bpo->bpo_havecomp) {
*compp = bpo->bpo_phys->bpo_comp;
*uncompp = bpo->bpo_phys->bpo_uncomp;
mutex_exit(&bpo->bpo_lock);
return (0);
} else {
mutex_exit(&bpo->bpo_lock);
return (bpobj_space_range(bpo, 0, UINT64_MAX,
usedp, compp, uncompp));
}
}
/*
* Return the amount of space in the bpobj which is:
* mintxg < blk_birth <= maxtxg
*/
int
bpobj_space_range(bpobj_t *bpo, uint64_t mintxg, uint64_t maxtxg,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
struct space_range_arg sra = { 0 };
int err;
ASSERT(bpobj_is_open(bpo));
/*
* As an optimization, if they want the whole txg range, just
* get bpo_bytes rather than iterating over the bps.
*/
if (mintxg < TXG_INITIAL && maxtxg == UINT64_MAX && bpo->bpo_havecomp)
return (bpobj_space(bpo, usedp, compp, uncompp));
sra.spa = dmu_objset_spa(bpo->bpo_os);
sra.mintxg = mintxg;
sra.maxtxg = maxtxg;
err = bpobj_iterate_nofree(bpo, space_range_cb, &sra, NULL);
*usedp = sra.used;
*compp = sra.comp;
*uncompp = sra.uncomp;
return (err);
}
/*
* A bpobj_itor_t to append blkptrs to a bplist. Note that while blkptrs in a
* bpobj are designated as free or allocated that information is not preserved
* in bplists.
*/
int
bplist_append_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
(void) bp_freed, (void) tx;
bplist_t *bpl = arg;
bplist_append(bpl, bp);
return (0);
}
diff --git a/sys/contrib/openzfs/module/zfs/brt.c b/sys/contrib/openzfs/module/zfs/brt.c
index 99bd472d6fb4..ddd8eefe600b 100644
--- a/sys/contrib/openzfs/module/zfs/brt.c
+++ b/sys/contrib/openzfs/module/zfs/brt.c
@@ -1,1884 +1,1915 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2020, 2021, 2022 by Pawel Jakub Dawidek
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/brt.h>
#include <sys/ddt.h>
#include <sys/bitmap.h>
#include <sys/zap.h>
#include <sys/dmu_tx.h>
#include <sys/arc.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_scan.h>
#include <sys/vdev_impl.h>
#include <sys/kstat.h>
#include <sys/wmsum.h>
/*
* Block Cloning design.
*
* Block Cloning allows to manually clone a file (or a subset of its blocks)
* into another (or the same) file by just creating additional references to
* the data blocks without copying the data itself. Those references are kept
* in the Block Reference Tables (BRTs).
*
* In many ways this is similar to the existing deduplication, but there are
* some important differences:
*
* - Deduplication is automatic and Block Cloning is not - one has to use a
* dedicated system call(s) to clone the given file/blocks.
* - Deduplication keeps all data blocks in its table, even those referenced
* just once. Block Cloning creates an entry in its tables only when there
* are at least two references to the given data block. If the block was
* never explicitly cloned or the second to last reference was dropped,
* there will be neither space nor performance overhead.
* - Deduplication needs data to work - one needs to pass real data to the
* write(2) syscall, so hash can be calculated. Block Cloning doesn't require
* data, just block pointers to the data, so it is extremely fast, as we pay
* neither the cost of reading the data, nor the cost of writing the data -
* we operate exclusively on metadata.
* - If the D (dedup) bit is not set in the block pointer, it means that
* the block is not in the dedup table (DDT) and we won't consult the DDT
* when we need to free the block. Block Cloning must be consulted on every
* free, because we cannot modify the source BP (eg. by setting something
* similar to the D bit), thus we have no hint if the block is in the
* Block Reference Table (BRT), so we need to look into the BRT. There is
* an optimization in place that allows us to eliminate the majority of BRT
* lookups which is described below in the "Minimizing free penalty" section.
* - The BRT entry is much smaller than the DDT entry - for BRT we only store
* 64bit offset and 64bit reference counter.
* - Dedup keys are cryptographic hashes, so two blocks that are close to each
* other on disk are most likely in totally different parts of the DDT.
* The BRT entry keys are offsets into a single top-level VDEV, so data blocks
* from one file should have BRT entries close to each other.
* - Scrub will only do a single pass over a block that is referenced multiple
* times in the DDT. Unfortunately it is not currently (if at all) possible
* with Block Cloning and block referenced multiple times will be scrubbed
* multiple times. The new, sorted scrub should be able to eliminate
* duplicated reads given enough memory.
* - Deduplication requires cryptographically strong hash as a checksum or
* additional data verification. Block Cloning works with any checksum
* algorithm or even with checksumming disabled.
*
* As mentioned above, the BRT entries are much smaller than the DDT entries.
* To uniquely identify a block we just need its vdev id and offset. We also
* need to maintain a reference counter. The vdev id will often repeat, as there
* is a small number of top-level VDEVs and a large number of blocks stored in
* each VDEV. We take advantage of that to reduce the BRT entry size further by
* maintaining one BRT for each top-level VDEV, so we can then have only offset
* and counter as the BRT entry.
*
* Minimizing free penalty.
*
* Block Cloning allows creating additional references to any existing block.
* When we free a block there is no hint in the block pointer whether the block
* was cloned or not, so on each free we have to check if there is a
* corresponding entry in the BRT or not. If there is, we need to decrease
* the reference counter. Doing BRT lookup on every free can potentially be
* expensive by requiring additional I/Os if the BRT doesn't fit into memory.
* This is the main problem with deduplication, so we've learned our lesson and
* try not to repeat the same mistake here. How do we do that? We divide each
* top-level VDEV into 16MB regions. For each region we maintain a counter that
* is a sum of all the BRT entries that have offsets within the region. This
* creates the entries count array of 16bit numbers for each top-level VDEV.
* The entries count array is always kept in memory and updated on disk in the
* same transaction group as the BRT updates to keep everything in-sync. We can
* keep the array in memory, because it is very small. With 16MB regions and
* 1TB VDEV the array requires only 128kB of memory (we may decide to decrease
* the region size even further in the future). Now, when we want to free
* a block, we first consult the array. If the counter for the whole region is
* zero, there is no need to look for the BRT entry, as there isn't one for
* sure. If the counter for the region is greater than zero, only then we will
* do a BRT lookup and if an entry is found we will decrease the reference
* counter in the BRT entry and in the entry counters array.
*
* The entry counters array is small, but can potentially be larger for very
* large VDEVs or smaller regions. In this case we don't want to rewrite entire
* array on every change. We then divide the array into 32kB block and keep
* a bitmap of dirty blocks within a transaction group. When we sync the
* transaction group we can only update the parts of the entry counters array
* that were modified. Note: Keeping track of the dirty parts of the entry
* counters array is implemented, but updating only parts of the array on disk
* is not yet implemented - for now we will update entire array if there was
* any change.
*
* The implementation tries to be economic: if BRT is not used, or no longer
* used, there will be no entries in the MOS and no additional memory used (eg.
* the entry counters array is only allocated if needed).
*
* Interaction between Deduplication and Block Cloning.
*
* If both functionalities are in use, we could end up with a block that is
* referenced multiple times in both DDT and BRT. When we free one of the
* references we couldn't tell where it belongs, so we would have to decide
* what table takes the precedence: do we first clear DDT references or BRT
* references? To avoid this dilemma BRT cooperates with DDT - if a given block
* is being cloned using BRT and the BP has the D (dedup) bit set, BRT will
* lookup DDT entry instead and increase the counter there. No BRT entry
* will be created for a block which has the D (dedup) bit set.
* BRT may be more efficient for manual deduplication, but if the block is
* already in the DDT, then creating additional BRT entry would be less
* efficient. This clever idea was proposed by Allan Jude.
*
* Block Cloning across datasets.
*
* Block Cloning is not limited to cloning blocks within the same dataset.
* It is possible (and very useful) to clone blocks between different datasets.
* One use case is recovering files from snapshots. By cloning the files into
* dataset we need no additional storage. Without Block Cloning we would need
* additional space for those files.
* Another interesting use case is moving the files between datasets
* (copying the file content to the new dataset and removing the source file).
* In that case Block Cloning will only be used briefly, because the BRT entries
* will be removed when the source is removed.
* Note: currently it is not possible to clone blocks between encrypted
* datasets, even if those datasets use the same encryption key (this includes
* snapshots of encrypted datasets). Cloning blocks between datasets that use
* the same keys should be possible and should be implemented in the future.
*
* Block Cloning flow through ZFS layers.
*
* Note: Block Cloning can be used both for cloning file system blocks and ZVOL
* blocks. As of this writing no interface is implemented that allows for block
* cloning within a ZVOL.
* FreeBSD and Linux provides copy_file_range(2) system call and we will use it
* for blocking cloning.
*
* ssize_t
* copy_file_range(int infd, off_t *inoffp, int outfd, off_t *outoffp,
* size_t len, unsigned int flags);
*
* Even though offsets and length represent bytes, they have to be
- * block-aligned or we will return the EXDEV error so the upper layer can
+ * block-aligned or we will return an error so the upper layer can
* fallback to the generic mechanism that will just copy the data.
* Using copy_file_range(2) will call OS-independent zfs_clone_range() function.
* This function was implemented based on zfs_write(), but instead of writing
* the given data we first read block pointers using the new dmu_read_l0_bps()
* function from the source file. Once we have BPs from the source file we call
* the dmu_brt_clone() function on the destination file. This function
* allocates BPs for us. We iterate over all source BPs. If the given BP is
* a hole or an embedded block, we just copy BP as-is. If it points to a real
* data we place this BP on a BRT pending list using the brt_pending_add()
* function.
*
* We use this pending list to keep track of all BPs that got new references
* within this transaction group.
*
* Some special cases to consider and how we address them:
* - The block we want to clone may have been created within the same
* transaction group that we are trying to clone. Such block has no BP
- * allocated yet, so cannot be immediately cloned. We return EXDEV.
+ * allocated yet, so cannot be immediately cloned. We return EAGAIN.
* - The block we want to clone may have been modified within the same
- * transaction group. We return EXDEV.
+ * transaction group. We return EAGAIN.
* - A block may be cloned multiple times during one transaction group (that's
* why pending list is actually a tree and not an append-only list - this
* way we can figure out faster if this block is cloned for the first time
* in this txg or consecutive time).
* - A block may be cloned and freed within the same transaction group
* (see dbuf_undirty()).
* - A block may be cloned and within the same transaction group the clone
* can be cloned again (see dmu_read_l0_bps()).
* - A file might have been deleted, but the caller still has a file descriptor
* open to this file and clones it.
*
* When we free a block we have an additional step in the ZIO pipeline where we
* call the zio_brt_free() function. We then call the brt_entry_decref()
* that loads the corresponding BRT entry (if one exists) and decreases
* reference counter. If this is not the last reference we will stop ZIO
* pipeline here. If this is the last reference or the block is not in the
* BRT, we continue the pipeline and free the block as usual.
*
* At the beginning of spa_sync() where there can be no more block cloning,
* but before issuing frees we call brt_pending_apply(). This function applies
* all the new clones to the BRT table - we load BRT entries and update
* reference counters. To sync new BRT entries to disk, we use brt_sync()
* function. This function will sync all dirty per-top-level-vdev BRTs,
* the entry counters arrays, etc.
*
* Block Cloning and ZIL.
*
* Every clone operation is divided into chunks (similar to write) and each
* chunk is cloned in a separate transaction. The chunk size is determined by
* how many BPs we can fit into a single ZIL entry.
* Replaying clone operation is different from the regular clone operation,
* as when we log clone operations we cannot use the source object - it may
* reside on a different dataset, so we log BPs we want to clone.
* The ZIL is replayed when we mount the given dataset, not when the pool is
* imported. Taking this into account it is possible that the pool is imported
* without mounting datasets and the source dataset is destroyed before the
* destination dataset is mounted and its ZIL replayed.
* To address this situation we leverage zil_claim() mechanism where ZFS will
* parse all the ZILs on pool import. When we come across TX_CLONE_RANGE
* entries, we will bump reference counters for their BPs in the BRT and then
* on mount and ZIL replay we will just attach BPs to the file without
* bumping reference counters.
* Note it is still possible that after zil_claim() we never mount the
* destination, so we never replay its ZIL and we destroy it. This way we would
* end up with leaked references in BRT. We address that too as ZFS gives us
* a chance to clean this up on dataset destroy (see zil_free_clone_range()).
*/
/*
* BRT - Block Reference Table.
*/
#define BRT_OBJECT_VDEV_PREFIX "com.fudosecurity:brt:vdev:"
/*
* We divide each VDEV into 16MB chunks. Each chunk is represented in memory
* by a 16bit counter, thus 1TB VDEV requires 128kB of memory: (1TB / 16MB) * 2B
* Each element in this array represents how many BRT entries do we have in this
* chunk of storage. We always load this entire array into memory and update as
* needed. By having it in memory we can quickly tell (during zio_free()) if
* there are any BRT entries that we might need to update.
*
* This value cannot be larger than 16MB, at least as long as we support
* 512 byte block sizes. With 512 byte block size we can have exactly
* 32768 blocks in 16MB. In 32MB we could have 65536 blocks, which is one too
* many for a 16bit counter.
*/
#define BRT_RANGESIZE (16 * 1024 * 1024)
_Static_assert(BRT_RANGESIZE / SPA_MINBLOCKSIZE <= UINT16_MAX,
"BRT_RANGESIZE is too large.");
/*
* We don't want to update the whole structure every time. Maintain bitmap
* of dirty blocks within the regions, so that a single bit represents a
* block size of entcounts. For example if we have a 1PB vdev then all
* entcounts take 128MB of memory ((64TB / 16MB) * 2B). We can divide this
* 128MB array of entcounts into 32kB disk blocks, as we don't want to update
* the whole 128MB on disk when we have updated only a single entcount.
* We maintain a bitmap where each 32kB disk block within 128MB entcounts array
* is represented by a single bit. This gives us 4096 bits. A set bit in the
* bitmap means that we had a change in at least one of the 16384 entcounts
* that reside on a 32kB disk block (32kB / sizeof (uint16_t)).
*/
#define BRT_BLOCKSIZE (32 * 1024)
#define BRT_RANGESIZE_TO_NBLOCKS(size) \
(((size) - 1) / BRT_BLOCKSIZE / sizeof (uint16_t) + 1)
#define BRT_LITTLE_ENDIAN 0
#define BRT_BIG_ENDIAN 1
#ifdef _ZFS_LITTLE_ENDIAN
#define BRT_NATIVE_BYTEORDER BRT_LITTLE_ENDIAN
#define BRT_NON_NATIVE_BYTEORDER BRT_BIG_ENDIAN
#else
#define BRT_NATIVE_BYTEORDER BRT_BIG_ENDIAN
#define BRT_NON_NATIVE_BYTEORDER BRT_LITTLE_ENDIAN
#endif
typedef struct brt_vdev_phys {
uint64_t bvp_mos_entries;
uint64_t bvp_size;
uint64_t bvp_byteorder;
uint64_t bvp_totalcount;
uint64_t bvp_rangesize;
uint64_t bvp_usedspace;
uint64_t bvp_savedspace;
} brt_vdev_phys_t;
typedef struct brt_vdev {
/*
* VDEV id.
*/
uint64_t bv_vdevid;
/*
* Is the structure initiated?
* (bv_entcount and bv_bitmap are allocated?)
*/
boolean_t bv_initiated;
/*
* Object number in the MOS for the entcount array and brt_vdev_phys.
*/
uint64_t bv_mos_brtvdev;
/*
* Object number in the MOS for the entries table.
*/
uint64_t bv_mos_entries;
/*
* Entries to sync.
*/
avl_tree_t bv_tree;
/*
* Does the bv_entcount[] array needs byte swapping?
*/
boolean_t bv_need_byteswap;
/*
* Number of entries in the bv_entcount[] array.
*/
uint64_t bv_size;
/*
* This is the array with BRT entry count per BRT_RANGESIZE.
*/
uint16_t *bv_entcount;
/*
* Sum of all bv_entcount[]s.
*/
uint64_t bv_totalcount;
/*
* Space on disk occupied by cloned blocks (without compression).
*/
uint64_t bv_usedspace;
/*
* How much additional space would be occupied without block cloning.
*/
uint64_t bv_savedspace;
/*
* brt_vdev_phys needs updating on disk.
*/
boolean_t bv_meta_dirty;
/*
* bv_entcount[] needs updating on disk.
*/
boolean_t bv_entcount_dirty;
/*
* bv_entcount[] potentially can be a bit too big to sychronize it all
* when we just changed few entcounts. The fields below allow us to
* track updates to bv_entcount[] array since the last sync.
* A single bit in the bv_bitmap represents as many entcounts as can
* fit into a single BRT_BLOCKSIZE.
* For example we have 65536 entcounts in the bv_entcount array
* (so the whole array is 128kB). We updated bv_entcount[2] and
* bv_entcount[5]. In that case only first bit in the bv_bitmap will
* be set and we will write only first BRT_BLOCKSIZE out of 128kB.
*/
ulong_t *bv_bitmap;
uint64_t bv_nblocks;
} brt_vdev_t;
/*
* In-core brt
*/
typedef struct brt {
krwlock_t brt_lock;
spa_t *brt_spa;
#define brt_mos brt_spa->spa_meta_objset
uint64_t brt_rangesize;
uint64_t brt_usedspace;
uint64_t brt_savedspace;
avl_tree_t brt_pending_tree[TXG_SIZE];
kmutex_t brt_pending_lock[TXG_SIZE];
/* Sum of all entries across all bv_trees. */
uint64_t brt_nentries;
brt_vdev_t *brt_vdevs;
uint64_t brt_nvdevs;
} brt_t;
/* Size of bre_offset / sizeof (uint64_t). */
#define BRT_KEY_WORDS (1)
/*
* In-core brt entry.
* On-disk we use bre_offset as the key and bre_refcount as the value.
*/
typedef struct brt_entry {
uint64_t bre_offset;
uint64_t bre_refcount;
avl_node_t bre_node;
} brt_entry_t;
typedef struct brt_pending_entry {
blkptr_t bpe_bp;
int bpe_count;
avl_node_t bpe_node;
} brt_pending_entry_t;
static kmem_cache_t *brt_entry_cache;
static kmem_cache_t *brt_pending_entry_cache;
/*
* Enable/disable prefetching of BRT entries that we are going to modify.
*/
int zfs_brt_prefetch = 1;
#ifdef ZFS_DEBUG
#define BRT_DEBUG(...) do { \
if ((zfs_flags & ZFS_DEBUG_BRT) != 0) { \
__dprintf(B_TRUE, __FILE__, __func__, __LINE__, __VA_ARGS__); \
} \
} while (0)
#else
#define BRT_DEBUG(...) do { } while (0)
#endif
int brt_zap_leaf_blockshift = 12;
int brt_zap_indirect_blockshift = 12;
static kstat_t *brt_ksp;
typedef struct brt_stats {
kstat_named_t brt_addref_entry_in_memory;
kstat_named_t brt_addref_entry_not_on_disk;
kstat_named_t brt_addref_entry_on_disk;
kstat_named_t brt_addref_entry_read_lost_race;
kstat_named_t brt_decref_entry_in_memory;
kstat_named_t brt_decref_entry_loaded_from_disk;
kstat_named_t brt_decref_entry_not_in_memory;
kstat_named_t brt_decref_entry_not_on_disk;
kstat_named_t brt_decref_entry_read_lost_race;
kstat_named_t brt_decref_entry_still_referenced;
kstat_named_t brt_decref_free_data_later;
kstat_named_t brt_decref_free_data_now;
kstat_named_t brt_decref_no_entry;
} brt_stats_t;
static brt_stats_t brt_stats = {
{ "addref_entry_in_memory", KSTAT_DATA_UINT64 },
{ "addref_entry_not_on_disk", KSTAT_DATA_UINT64 },
{ "addref_entry_on_disk", KSTAT_DATA_UINT64 },
{ "addref_entry_read_lost_race", KSTAT_DATA_UINT64 },
{ "decref_entry_in_memory", KSTAT_DATA_UINT64 },
{ "decref_entry_loaded_from_disk", KSTAT_DATA_UINT64 },
{ "decref_entry_not_in_memory", KSTAT_DATA_UINT64 },
{ "decref_entry_not_on_disk", KSTAT_DATA_UINT64 },
{ "decref_entry_read_lost_race", KSTAT_DATA_UINT64 },
{ "decref_entry_still_referenced", KSTAT_DATA_UINT64 },
{ "decref_free_data_later", KSTAT_DATA_UINT64 },
{ "decref_free_data_now", KSTAT_DATA_UINT64 },
{ "decref_no_entry", KSTAT_DATA_UINT64 }
};
struct {
wmsum_t brt_addref_entry_in_memory;
wmsum_t brt_addref_entry_not_on_disk;
wmsum_t brt_addref_entry_on_disk;
wmsum_t brt_addref_entry_read_lost_race;
wmsum_t brt_decref_entry_in_memory;
wmsum_t brt_decref_entry_loaded_from_disk;
wmsum_t brt_decref_entry_not_in_memory;
wmsum_t brt_decref_entry_not_on_disk;
wmsum_t brt_decref_entry_read_lost_race;
wmsum_t brt_decref_entry_still_referenced;
wmsum_t brt_decref_free_data_later;
wmsum_t brt_decref_free_data_now;
wmsum_t brt_decref_no_entry;
} brt_sums;
#define BRTSTAT_BUMP(stat) wmsum_add(&brt_sums.stat, 1)
static int brt_entry_compare(const void *x1, const void *x2);
static int brt_pending_entry_compare(const void *x1, const void *x2);
static void
brt_rlock(brt_t *brt)
{
rw_enter(&brt->brt_lock, RW_READER);
}
static void
brt_wlock(brt_t *brt)
{
rw_enter(&brt->brt_lock, RW_WRITER);
}
static void
brt_unlock(brt_t *brt)
{
rw_exit(&brt->brt_lock);
}
static uint16_t
brt_vdev_entcount_get(const brt_vdev_t *brtvd, uint64_t idx)
{
ASSERT3U(idx, <, brtvd->bv_size);
if (brtvd->bv_need_byteswap) {
return (BSWAP_16(brtvd->bv_entcount[idx]));
} else {
return (brtvd->bv_entcount[idx]);
}
}
static void
brt_vdev_entcount_set(brt_vdev_t *brtvd, uint64_t idx, uint16_t entcnt)
{
ASSERT3U(idx, <, brtvd->bv_size);
if (brtvd->bv_need_byteswap) {
brtvd->bv_entcount[idx] = BSWAP_16(entcnt);
} else {
brtvd->bv_entcount[idx] = entcnt;
}
}
static void
brt_vdev_entcount_inc(brt_vdev_t *brtvd, uint64_t idx)
{
uint16_t entcnt;
ASSERT3U(idx, <, brtvd->bv_size);
entcnt = brt_vdev_entcount_get(brtvd, idx);
ASSERT(entcnt < UINT16_MAX);
brt_vdev_entcount_set(brtvd, idx, entcnt + 1);
}
static void
brt_vdev_entcount_dec(brt_vdev_t *brtvd, uint64_t idx)
{
uint16_t entcnt;
ASSERT3U(idx, <, brtvd->bv_size);
entcnt = brt_vdev_entcount_get(brtvd, idx);
ASSERT(entcnt > 0);
brt_vdev_entcount_set(brtvd, idx, entcnt - 1);
}
#ifdef ZFS_DEBUG
static void
brt_vdev_dump(brt_t *brt)
{
brt_vdev_t *brtvd;
uint64_t vdevid;
if ((zfs_flags & ZFS_DEBUG_BRT) == 0) {
return;
}
if (brt->brt_nvdevs == 0) {
zfs_dbgmsg("BRT empty");
return;
}
zfs_dbgmsg("BRT vdev dump:");
for (vdevid = 0; vdevid < brt->brt_nvdevs; vdevid++) {
uint64_t idx;
brtvd = &brt->brt_vdevs[vdevid];
zfs_dbgmsg(" vdevid=%llu/%llu meta_dirty=%d entcount_dirty=%d "
"size=%llu totalcount=%llu nblocks=%llu bitmapsize=%zu\n",
(u_longlong_t)vdevid, (u_longlong_t)brtvd->bv_vdevid,
brtvd->bv_meta_dirty, brtvd->bv_entcount_dirty,
(u_longlong_t)brtvd->bv_size,
(u_longlong_t)brtvd->bv_totalcount,
(u_longlong_t)brtvd->bv_nblocks,
(size_t)BT_SIZEOFMAP(brtvd->bv_nblocks));
if (brtvd->bv_totalcount > 0) {
zfs_dbgmsg(" entcounts:");
for (idx = 0; idx < brtvd->bv_size; idx++) {
if (brt_vdev_entcount_get(brtvd, idx) > 0) {
zfs_dbgmsg(" [%04llu] %hu",
(u_longlong_t)idx,
brt_vdev_entcount_get(brtvd, idx));
}
}
}
if (brtvd->bv_entcount_dirty) {
char *bitmap;
bitmap = kmem_alloc(brtvd->bv_nblocks + 1, KM_SLEEP);
for (idx = 0; idx < brtvd->bv_nblocks; idx++) {
bitmap[idx] =
BT_TEST(brtvd->bv_bitmap, idx) ? 'x' : '.';
}
bitmap[idx] = '\0';
zfs_dbgmsg(" bitmap: %s", bitmap);
kmem_free(bitmap, brtvd->bv_nblocks + 1);
}
}
}
#endif
static brt_vdev_t *
brt_vdev(brt_t *brt, uint64_t vdevid)
{
brt_vdev_t *brtvd;
ASSERT(RW_LOCK_HELD(&brt->brt_lock));
if (vdevid < brt->brt_nvdevs) {
brtvd = &brt->brt_vdevs[vdevid];
} else {
brtvd = NULL;
}
return (brtvd);
}
static void
brt_vdev_create(brt_t *brt, brt_vdev_t *brtvd, dmu_tx_t *tx)
{
char name[64];
ASSERT(RW_WRITE_HELD(&brt->brt_lock));
ASSERT0(brtvd->bv_mos_brtvdev);
ASSERT0(brtvd->bv_mos_entries);
ASSERT(brtvd->bv_entcount != NULL);
ASSERT(brtvd->bv_size > 0);
ASSERT(brtvd->bv_bitmap != NULL);
ASSERT(brtvd->bv_nblocks > 0);
brtvd->bv_mos_entries = zap_create_flags(brt->brt_mos, 0,
ZAP_FLAG_HASH64 | ZAP_FLAG_UINT64_KEY, DMU_OTN_ZAP_METADATA,
brt_zap_leaf_blockshift, brt_zap_indirect_blockshift, DMU_OT_NONE,
0, tx);
VERIFY(brtvd->bv_mos_entries != 0);
BRT_DEBUG("MOS entries created, object=%llu",
(u_longlong_t)brtvd->bv_mos_entries);
/*
* We allocate DMU buffer to store the bv_entcount[] array.
* We will keep array size (bv_size) and cummulative count for all
* bv_entcount[]s (bv_totalcount) in the bonus buffer.
*/
brtvd->bv_mos_brtvdev = dmu_object_alloc(brt->brt_mos,
DMU_OTN_UINT64_METADATA, BRT_BLOCKSIZE,
DMU_OTN_UINT64_METADATA, sizeof (brt_vdev_phys_t), tx);
VERIFY(brtvd->bv_mos_brtvdev != 0);
BRT_DEBUG("MOS BRT VDEV created, object=%llu",
(u_longlong_t)brtvd->bv_mos_brtvdev);
snprintf(name, sizeof (name), "%s%llu", BRT_OBJECT_VDEV_PREFIX,
(u_longlong_t)brtvd->bv_vdevid);
VERIFY0(zap_add(brt->brt_mos, DMU_POOL_DIRECTORY_OBJECT, name,
sizeof (uint64_t), 1, &brtvd->bv_mos_brtvdev, tx));
BRT_DEBUG("Pool directory object created, object=%s", name);
spa_feature_incr(brt->brt_spa, SPA_FEATURE_BLOCK_CLONING, tx);
}
static void
brt_vdev_realloc(brt_t *brt, brt_vdev_t *brtvd)
{
vdev_t *vd;
uint16_t *entcount;
ulong_t *bitmap;
uint64_t nblocks, size;
ASSERT(RW_WRITE_HELD(&brt->brt_lock));
spa_config_enter(brt->brt_spa, SCL_VDEV, FTAG, RW_READER);
vd = vdev_lookup_top(brt->brt_spa, brtvd->bv_vdevid);
size = (vdev_get_min_asize(vd) - 1) / brt->brt_rangesize + 1;
spa_config_exit(brt->brt_spa, SCL_VDEV, FTAG);
- entcount = kmem_zalloc(sizeof (entcount[0]) * size, KM_SLEEP);
+ entcount = vmem_zalloc(sizeof (entcount[0]) * size, KM_SLEEP);
nblocks = BRT_RANGESIZE_TO_NBLOCKS(size);
bitmap = kmem_zalloc(BT_SIZEOFMAP(nblocks), KM_SLEEP);
if (!brtvd->bv_initiated) {
ASSERT0(brtvd->bv_size);
ASSERT(brtvd->bv_entcount == NULL);
ASSERT(brtvd->bv_bitmap == NULL);
ASSERT0(brtvd->bv_nblocks);
avl_create(&brtvd->bv_tree, brt_entry_compare,
sizeof (brt_entry_t), offsetof(brt_entry_t, bre_node));
} else {
ASSERT(brtvd->bv_size > 0);
ASSERT(brtvd->bv_entcount != NULL);
ASSERT(brtvd->bv_bitmap != NULL);
ASSERT(brtvd->bv_nblocks > 0);
/*
* TODO: Allow vdev shrinking. We only need to implement
* shrinking the on-disk BRT VDEV object.
* dmu_free_range(brt->brt_mos, brtvd->bv_mos_brtvdev, offset,
* size, tx);
*/
ASSERT3U(brtvd->bv_size, <=, size);
memcpy(entcount, brtvd->bv_entcount,
sizeof (entcount[0]) * MIN(size, brtvd->bv_size));
memcpy(bitmap, brtvd->bv_bitmap, MIN(BT_SIZEOFMAP(nblocks),
BT_SIZEOFMAP(brtvd->bv_nblocks)));
- kmem_free(brtvd->bv_entcount,
+ vmem_free(brtvd->bv_entcount,
sizeof (entcount[0]) * brtvd->bv_size);
kmem_free(brtvd->bv_bitmap, BT_SIZEOFMAP(brtvd->bv_nblocks));
}
brtvd->bv_size = size;
brtvd->bv_entcount = entcount;
brtvd->bv_bitmap = bitmap;
brtvd->bv_nblocks = nblocks;
if (!brtvd->bv_initiated) {
brtvd->bv_need_byteswap = FALSE;
brtvd->bv_initiated = TRUE;
BRT_DEBUG("BRT VDEV %llu initiated.",
(u_longlong_t)brtvd->bv_vdevid);
}
}
static void
brt_vdev_load(brt_t *brt, brt_vdev_t *brtvd)
{
char name[64];
dmu_buf_t *db;
brt_vdev_phys_t *bvphys;
int error;
snprintf(name, sizeof (name), "%s%llu", BRT_OBJECT_VDEV_PREFIX,
(u_longlong_t)brtvd->bv_vdevid);
error = zap_lookup(brt->brt_mos, DMU_POOL_DIRECTORY_OBJECT, name,
sizeof (uint64_t), 1, &brtvd->bv_mos_brtvdev);
if (error != 0)
return;
ASSERT(brtvd->bv_mos_brtvdev != 0);
error = dmu_bonus_hold(brt->brt_mos, brtvd->bv_mos_brtvdev, FTAG, &db);
ASSERT0(error);
if (error != 0)
return;
bvphys = db->db_data;
if (brt->brt_rangesize == 0) {
brt->brt_rangesize = bvphys->bvp_rangesize;
} else {
ASSERT3U(brt->brt_rangesize, ==, bvphys->bvp_rangesize);
}
ASSERT(!brtvd->bv_initiated);
brt_vdev_realloc(brt, brtvd);
/* TODO: We don't support VDEV shrinking. */
ASSERT3U(bvphys->bvp_size, <=, brtvd->bv_size);
/*
* If VDEV grew, we will leave new bv_entcount[] entries zeroed out.
*/
error = dmu_read(brt->brt_mos, brtvd->bv_mos_brtvdev, 0,
MIN(brtvd->bv_size, bvphys->bvp_size) * sizeof (uint16_t),
brtvd->bv_entcount, DMU_READ_NO_PREFETCH);
ASSERT0(error);
brtvd->bv_mos_entries = bvphys->bvp_mos_entries;
ASSERT(brtvd->bv_mos_entries != 0);
brtvd->bv_need_byteswap =
(bvphys->bvp_byteorder != BRT_NATIVE_BYTEORDER);
brtvd->bv_totalcount = bvphys->bvp_totalcount;
brtvd->bv_usedspace = bvphys->bvp_usedspace;
brtvd->bv_savedspace = bvphys->bvp_savedspace;
brt->brt_usedspace += brtvd->bv_usedspace;
brt->brt_savedspace += brtvd->bv_savedspace;
dmu_buf_rele(db, FTAG);
BRT_DEBUG("MOS BRT VDEV %s loaded: mos_brtvdev=%llu, mos_entries=%llu",
name, (u_longlong_t)brtvd->bv_mos_brtvdev,
(u_longlong_t)brtvd->bv_mos_entries);
}
static void
brt_vdev_dealloc(brt_t *brt, brt_vdev_t *brtvd)
{
ASSERT(RW_WRITE_HELD(&brt->brt_lock));
ASSERT(brtvd->bv_initiated);
- kmem_free(brtvd->bv_entcount, sizeof (uint16_t) * brtvd->bv_size);
+ vmem_free(brtvd->bv_entcount, sizeof (uint16_t) * brtvd->bv_size);
brtvd->bv_entcount = NULL;
kmem_free(brtvd->bv_bitmap, BT_SIZEOFMAP(brtvd->bv_nblocks));
brtvd->bv_bitmap = NULL;
ASSERT0(avl_numnodes(&brtvd->bv_tree));
avl_destroy(&brtvd->bv_tree);
brtvd->bv_size = 0;
brtvd->bv_nblocks = 0;
brtvd->bv_initiated = FALSE;
BRT_DEBUG("BRT VDEV %llu deallocated.", (u_longlong_t)brtvd->bv_vdevid);
}
static void
brt_vdev_destroy(brt_t *brt, brt_vdev_t *brtvd, dmu_tx_t *tx)
{
char name[64];
uint64_t count;
dmu_buf_t *db;
brt_vdev_phys_t *bvphys;
ASSERT(RW_WRITE_HELD(&brt->brt_lock));
ASSERT(brtvd->bv_mos_brtvdev != 0);
ASSERT(brtvd->bv_mos_entries != 0);
VERIFY0(zap_count(brt->brt_mos, brtvd->bv_mos_entries, &count));
VERIFY0(count);
VERIFY0(zap_destroy(brt->brt_mos, brtvd->bv_mos_entries, tx));
BRT_DEBUG("MOS entries destroyed, object=%llu",
(u_longlong_t)brtvd->bv_mos_entries);
brtvd->bv_mos_entries = 0;
VERIFY0(dmu_bonus_hold(brt->brt_mos, brtvd->bv_mos_brtvdev, FTAG, &db));
bvphys = db->db_data;
ASSERT0(bvphys->bvp_totalcount);
ASSERT0(bvphys->bvp_usedspace);
ASSERT0(bvphys->bvp_savedspace);
dmu_buf_rele(db, FTAG);
VERIFY0(dmu_object_free(brt->brt_mos, brtvd->bv_mos_brtvdev, tx));
BRT_DEBUG("MOS BRT VDEV destroyed, object=%llu",
(u_longlong_t)brtvd->bv_mos_brtvdev);
brtvd->bv_mos_brtvdev = 0;
snprintf(name, sizeof (name), "%s%llu", BRT_OBJECT_VDEV_PREFIX,
(u_longlong_t)brtvd->bv_vdevid);
VERIFY0(zap_remove(brt->brt_mos, DMU_POOL_DIRECTORY_OBJECT, name, tx));
BRT_DEBUG("Pool directory object removed, object=%s", name);
brt_vdev_dealloc(brt, brtvd);
spa_feature_decr(brt->brt_spa, SPA_FEATURE_BLOCK_CLONING, tx);
}
static void
brt_vdevs_expand(brt_t *brt, uint64_t nvdevs)
{
brt_vdev_t *brtvd, *vdevs;
uint64_t vdevid;
ASSERT(RW_WRITE_HELD(&brt->brt_lock));
ASSERT3U(nvdevs, >, brt->brt_nvdevs);
vdevs = kmem_zalloc(sizeof (vdevs[0]) * nvdevs, KM_SLEEP);
if (brt->brt_nvdevs > 0) {
ASSERT(brt->brt_vdevs != NULL);
memcpy(vdevs, brt->brt_vdevs,
sizeof (brt_vdev_t) * brt->brt_nvdevs);
kmem_free(brt->brt_vdevs,
sizeof (brt_vdev_t) * brt->brt_nvdevs);
}
for (vdevid = brt->brt_nvdevs; vdevid < nvdevs; vdevid++) {
brtvd = &vdevs[vdevid];
brtvd->bv_vdevid = vdevid;
brtvd->bv_initiated = FALSE;
}
BRT_DEBUG("BRT VDEVs expanded from %llu to %llu.",
(u_longlong_t)brt->brt_nvdevs, (u_longlong_t)nvdevs);
brt->brt_vdevs = vdevs;
brt->brt_nvdevs = nvdevs;
}
static boolean_t
brt_vdev_lookup(brt_t *brt, brt_vdev_t *brtvd, const brt_entry_t *bre)
{
uint64_t idx;
ASSERT(RW_LOCK_HELD(&brt->brt_lock));
idx = bre->bre_offset / brt->brt_rangesize;
if (brtvd->bv_entcount != NULL && idx < brtvd->bv_size) {
/* VDEV wasn't expanded. */
return (brt_vdev_entcount_get(brtvd, idx) > 0);
}
return (FALSE);
}
static void
brt_vdev_addref(brt_t *brt, brt_vdev_t *brtvd, const brt_entry_t *bre,
uint64_t dsize)
{
uint64_t idx;
ASSERT(RW_LOCK_HELD(&brt->brt_lock));
ASSERT(brtvd != NULL);
ASSERT(brtvd->bv_entcount != NULL);
brt->brt_savedspace += dsize;
brtvd->bv_savedspace += dsize;
brtvd->bv_meta_dirty = TRUE;
if (bre->bre_refcount > 1) {
return;
}
brt->brt_usedspace += dsize;
brtvd->bv_usedspace += dsize;
idx = bre->bre_offset / brt->brt_rangesize;
if (idx >= brtvd->bv_size) {
/* VDEV has been expanded. */
brt_vdev_realloc(brt, brtvd);
}
ASSERT3U(idx, <, brtvd->bv_size);
brtvd->bv_totalcount++;
brt_vdev_entcount_inc(brtvd, idx);
brtvd->bv_entcount_dirty = TRUE;
idx = idx / BRT_BLOCKSIZE / 8;
BT_SET(brtvd->bv_bitmap, idx);
#ifdef ZFS_DEBUG
brt_vdev_dump(brt);
#endif
}
static void
brt_vdev_decref(brt_t *brt, brt_vdev_t *brtvd, const brt_entry_t *bre,
uint64_t dsize)
{
uint64_t idx;
ASSERT(RW_WRITE_HELD(&brt->brt_lock));
ASSERT(brtvd != NULL);
ASSERT(brtvd->bv_entcount != NULL);
brt->brt_savedspace -= dsize;
brtvd->bv_savedspace -= dsize;
brtvd->bv_meta_dirty = TRUE;
if (bre->bre_refcount > 0) {
return;
}
brt->brt_usedspace -= dsize;
brtvd->bv_usedspace -= dsize;
idx = bre->bre_offset / brt->brt_rangesize;
ASSERT3U(idx, <, brtvd->bv_size);
ASSERT(brtvd->bv_totalcount > 0);
brtvd->bv_totalcount--;
brt_vdev_entcount_dec(brtvd, idx);
brtvd->bv_entcount_dirty = TRUE;
idx = idx / BRT_BLOCKSIZE / 8;
BT_SET(brtvd->bv_bitmap, idx);
#ifdef ZFS_DEBUG
brt_vdev_dump(brt);
#endif
}
static void
brt_vdev_sync(brt_t *brt, brt_vdev_t *brtvd, dmu_tx_t *tx)
{
dmu_buf_t *db;
brt_vdev_phys_t *bvphys;
ASSERT(brtvd->bv_meta_dirty);
ASSERT(brtvd->bv_mos_brtvdev != 0);
ASSERT(dmu_tx_is_syncing(tx));
VERIFY0(dmu_bonus_hold(brt->brt_mos, brtvd->bv_mos_brtvdev, FTAG, &db));
if (brtvd->bv_entcount_dirty) {
/*
* TODO: Walk brtvd->bv_bitmap and write only the dirty blocks.
*/
dmu_write(brt->brt_mos, brtvd->bv_mos_brtvdev, 0,
brtvd->bv_size * sizeof (brtvd->bv_entcount[0]),
brtvd->bv_entcount, tx);
memset(brtvd->bv_bitmap, 0, BT_SIZEOFMAP(brtvd->bv_nblocks));
brtvd->bv_entcount_dirty = FALSE;
}
dmu_buf_will_dirty(db, tx);
bvphys = db->db_data;
bvphys->bvp_mos_entries = brtvd->bv_mos_entries;
bvphys->bvp_size = brtvd->bv_size;
if (brtvd->bv_need_byteswap) {
bvphys->bvp_byteorder = BRT_NON_NATIVE_BYTEORDER;
} else {
bvphys->bvp_byteorder = BRT_NATIVE_BYTEORDER;
}
bvphys->bvp_totalcount = brtvd->bv_totalcount;
bvphys->bvp_rangesize = brt->brt_rangesize;
bvphys->bvp_usedspace = brtvd->bv_usedspace;
bvphys->bvp_savedspace = brtvd->bv_savedspace;
dmu_buf_rele(db, FTAG);
brtvd->bv_meta_dirty = FALSE;
}
static void
brt_vdevs_alloc(brt_t *brt, boolean_t load)
{
brt_vdev_t *brtvd;
uint64_t vdevid;
brt_wlock(brt);
brt_vdevs_expand(brt, brt->brt_spa->spa_root_vdev->vdev_children);
if (load) {
for (vdevid = 0; vdevid < brt->brt_nvdevs; vdevid++) {
brtvd = &brt->brt_vdevs[vdevid];
ASSERT(brtvd->bv_entcount == NULL);
brt_vdev_load(brt, brtvd);
}
}
if (brt->brt_rangesize == 0) {
brt->brt_rangesize = BRT_RANGESIZE;
}
brt_unlock(brt);
}
static void
brt_vdevs_free(brt_t *brt)
{
brt_vdev_t *brtvd;
uint64_t vdevid;
brt_wlock(brt);
for (vdevid = 0; vdevid < brt->brt_nvdevs; vdevid++) {
brtvd = &brt->brt_vdevs[vdevid];
if (brtvd->bv_initiated)
brt_vdev_dealloc(brt, brtvd);
}
kmem_free(brt->brt_vdevs, sizeof (brt_vdev_t) * brt->brt_nvdevs);
brt_unlock(brt);
}
static void
brt_entry_fill(const blkptr_t *bp, brt_entry_t *bre, uint64_t *vdevidp)
{
bre->bre_offset = DVA_GET_OFFSET(&bp->blk_dva[0]);
bre->bre_refcount = 0;
*vdevidp = DVA_GET_VDEV(&bp->blk_dva[0]);
}
static int
brt_entry_compare(const void *x1, const void *x2)
{
const brt_entry_t *bre1 = x1;
const brt_entry_t *bre2 = x2;
return (TREE_CMP(bre1->bre_offset, bre2->bre_offset));
}
static int
brt_entry_lookup(brt_t *brt, brt_vdev_t *brtvd, brt_entry_t *bre)
{
uint64_t mos_entries;
uint64_t one, physsize;
int error;
ASSERT(RW_LOCK_HELD(&brt->brt_lock));
if (!brt_vdev_lookup(brt, brtvd, bre))
return (SET_ERROR(ENOENT));
/*
* Remember mos_entries object number. After we reacquire the BRT lock,
* the brtvd pointer may be invalid.
*/
mos_entries = brtvd->bv_mos_entries;
if (mos_entries == 0)
return (SET_ERROR(ENOENT));
brt_unlock(brt);
error = zap_length_uint64(brt->brt_mos, mos_entries, &bre->bre_offset,
BRT_KEY_WORDS, &one, &physsize);
if (error == 0) {
ASSERT3U(one, ==, 1);
ASSERT3U(physsize, ==, sizeof (bre->bre_refcount));
error = zap_lookup_uint64(brt->brt_mos, mos_entries,
&bre->bre_offset, BRT_KEY_WORDS, 1,
sizeof (bre->bre_refcount), &bre->bre_refcount);
BRT_DEBUG("ZAP lookup: object=%llu vdev=%llu offset=%llu "
"count=%llu error=%d", (u_longlong_t)mos_entries,
(u_longlong_t)brtvd->bv_vdevid,
(u_longlong_t)bre->bre_offset,
error == 0 ? (u_longlong_t)bre->bre_refcount : 0, error);
}
brt_wlock(brt);
return (error);
}
static void
brt_entry_prefetch(brt_t *brt, uint64_t vdevid, brt_entry_t *bre)
{
brt_vdev_t *brtvd;
uint64_t mos_entries = 0;
brt_rlock(brt);
brtvd = brt_vdev(brt, vdevid);
if (brtvd != NULL)
mos_entries = brtvd->bv_mos_entries;
brt_unlock(brt);
if (mos_entries == 0)
return;
BRT_DEBUG("ZAP prefetch: object=%llu vdev=%llu offset=%llu",
(u_longlong_t)mos_entries, (u_longlong_t)vdevid,
(u_longlong_t)bre->bre_offset);
(void) zap_prefetch_uint64(brt->brt_mos, mos_entries,
(uint64_t *)&bre->bre_offset, BRT_KEY_WORDS);
}
static int
brt_entry_update(brt_t *brt, brt_vdev_t *brtvd, brt_entry_t *bre, dmu_tx_t *tx)
{
int error;
ASSERT(RW_LOCK_HELD(&brt->brt_lock));
ASSERT(brtvd->bv_mos_entries != 0);
ASSERT(bre->bre_refcount > 0);
error = zap_update_uint64(brt->brt_mos, brtvd->bv_mos_entries,
(uint64_t *)&bre->bre_offset, BRT_KEY_WORDS, 1,
sizeof (bre->bre_refcount), &bre->bre_refcount, tx);
BRT_DEBUG("ZAP update: object=%llu vdev=%llu offset=%llu count=%llu "
"error=%d", (u_longlong_t)brtvd->bv_mos_entries,
(u_longlong_t)brtvd->bv_vdevid, (u_longlong_t)bre->bre_offset,
(u_longlong_t)bre->bre_refcount, error);
return (error);
}
static int
brt_entry_remove(brt_t *brt, brt_vdev_t *brtvd, brt_entry_t *bre, dmu_tx_t *tx)
{
int error;
ASSERT(RW_LOCK_HELD(&brt->brt_lock));
ASSERT(brtvd->bv_mos_entries != 0);
ASSERT0(bre->bre_refcount);
error = zap_remove_uint64(brt->brt_mos, brtvd->bv_mos_entries,
(uint64_t *)&bre->bre_offset, BRT_KEY_WORDS, tx);
BRT_DEBUG("ZAP remove: object=%llu vdev=%llu offset=%llu count=%llu "
"error=%d", (u_longlong_t)brtvd->bv_mos_entries,
(u_longlong_t)brtvd->bv_vdevid, (u_longlong_t)bre->bre_offset,
(u_longlong_t)bre->bre_refcount, error);
return (error);
}
/*
* Return TRUE if we _can_ have BRT entry for this bp. It might be false
* positive, but gives us quick answer if we should look into BRT, which
* may require reads and thus will be more expensive.
*/
boolean_t
brt_maybe_exists(spa_t *spa, const blkptr_t *bp)
{
brt_t *brt = spa->spa_brt;
brt_vdev_t *brtvd;
brt_entry_t bre_search;
boolean_t mayexists = FALSE;
uint64_t vdevid;
brt_entry_fill(bp, &bre_search, &vdevid);
brt_rlock(brt);
brtvd = brt_vdev(brt, vdevid);
if (brtvd != NULL && brtvd->bv_initiated) {
if (!avl_is_empty(&brtvd->bv_tree) ||
brt_vdev_lookup(brt, brtvd, &bre_search)) {
mayexists = TRUE;
}
}
brt_unlock(brt);
return (mayexists);
}
uint64_t
brt_get_dspace(spa_t *spa)
{
brt_t *brt = spa->spa_brt;
if (brt == NULL)
return (0);
return (brt->brt_savedspace);
}
uint64_t
brt_get_used(spa_t *spa)
{
brt_t *brt = spa->spa_brt;
if (brt == NULL)
return (0);
return (brt->brt_usedspace);
}
uint64_t
brt_get_saved(spa_t *spa)
{
brt_t *brt = spa->spa_brt;
if (brt == NULL)
return (0);
return (brt->brt_savedspace);
}
uint64_t
brt_get_ratio(spa_t *spa)
{
brt_t *brt = spa->spa_brt;
if (brt->brt_usedspace == 0)
return (100);
return ((brt->brt_usedspace + brt->brt_savedspace) * 100 /
brt->brt_usedspace);
}
static int
brt_kstats_update(kstat_t *ksp, int rw)
{
brt_stats_t *bs = ksp->ks_data;
if (rw == KSTAT_WRITE)
return (EACCES);
bs->brt_addref_entry_in_memory.value.ui64 =
wmsum_value(&brt_sums.brt_addref_entry_in_memory);
bs->brt_addref_entry_not_on_disk.value.ui64 =
wmsum_value(&brt_sums.brt_addref_entry_not_on_disk);
bs->brt_addref_entry_on_disk.value.ui64 =
wmsum_value(&brt_sums.brt_addref_entry_on_disk);
bs->brt_addref_entry_read_lost_race.value.ui64 =
wmsum_value(&brt_sums.brt_addref_entry_read_lost_race);
bs->brt_decref_entry_in_memory.value.ui64 =
wmsum_value(&brt_sums.brt_decref_entry_in_memory);
bs->brt_decref_entry_loaded_from_disk.value.ui64 =
wmsum_value(&brt_sums.brt_decref_entry_loaded_from_disk);
bs->brt_decref_entry_not_in_memory.value.ui64 =
wmsum_value(&brt_sums.brt_decref_entry_not_in_memory);
bs->brt_decref_entry_not_on_disk.value.ui64 =
wmsum_value(&brt_sums.brt_decref_entry_not_on_disk);
bs->brt_decref_entry_read_lost_race.value.ui64 =
wmsum_value(&brt_sums.brt_decref_entry_read_lost_race);
bs->brt_decref_entry_still_referenced.value.ui64 =
wmsum_value(&brt_sums.brt_decref_entry_still_referenced);
bs->brt_decref_free_data_later.value.ui64 =
wmsum_value(&brt_sums.brt_decref_free_data_later);
bs->brt_decref_free_data_now.value.ui64 =
wmsum_value(&brt_sums.brt_decref_free_data_now);
bs->brt_decref_no_entry.value.ui64 =
wmsum_value(&brt_sums.brt_decref_no_entry);
return (0);
}
static void
brt_stat_init(void)
{
wmsum_init(&brt_sums.brt_addref_entry_in_memory, 0);
wmsum_init(&brt_sums.brt_addref_entry_not_on_disk, 0);
wmsum_init(&brt_sums.brt_addref_entry_on_disk, 0);
wmsum_init(&brt_sums.brt_addref_entry_read_lost_race, 0);
wmsum_init(&brt_sums.brt_decref_entry_in_memory, 0);
wmsum_init(&brt_sums.brt_decref_entry_loaded_from_disk, 0);
wmsum_init(&brt_sums.brt_decref_entry_not_in_memory, 0);
wmsum_init(&brt_sums.brt_decref_entry_not_on_disk, 0);
wmsum_init(&brt_sums.brt_decref_entry_read_lost_race, 0);
wmsum_init(&brt_sums.brt_decref_entry_still_referenced, 0);
wmsum_init(&brt_sums.brt_decref_free_data_later, 0);
wmsum_init(&brt_sums.brt_decref_free_data_now, 0);
wmsum_init(&brt_sums.brt_decref_no_entry, 0);
brt_ksp = kstat_create("zfs", 0, "brtstats", "misc", KSTAT_TYPE_NAMED,
sizeof (brt_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (brt_ksp != NULL) {
brt_ksp->ks_data = &brt_stats;
brt_ksp->ks_update = brt_kstats_update;
kstat_install(brt_ksp);
}
}
static void
brt_stat_fini(void)
{
if (brt_ksp != NULL) {
kstat_delete(brt_ksp);
brt_ksp = NULL;
}
wmsum_fini(&brt_sums.brt_addref_entry_in_memory);
wmsum_fini(&brt_sums.brt_addref_entry_not_on_disk);
wmsum_fini(&brt_sums.brt_addref_entry_on_disk);
wmsum_fini(&brt_sums.brt_addref_entry_read_lost_race);
wmsum_fini(&brt_sums.brt_decref_entry_in_memory);
wmsum_fini(&brt_sums.brt_decref_entry_loaded_from_disk);
wmsum_fini(&brt_sums.brt_decref_entry_not_in_memory);
wmsum_fini(&brt_sums.brt_decref_entry_not_on_disk);
wmsum_fini(&brt_sums.brt_decref_entry_read_lost_race);
wmsum_fini(&brt_sums.brt_decref_entry_still_referenced);
wmsum_fini(&brt_sums.brt_decref_free_data_later);
wmsum_fini(&brt_sums.brt_decref_free_data_now);
wmsum_fini(&brt_sums.brt_decref_no_entry);
}
void
brt_init(void)
{
brt_entry_cache = kmem_cache_create("brt_entry_cache",
sizeof (brt_entry_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
brt_pending_entry_cache = kmem_cache_create("brt_pending_entry_cache",
sizeof (brt_pending_entry_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
brt_stat_init();
}
void
brt_fini(void)
{
brt_stat_fini();
kmem_cache_destroy(brt_entry_cache);
kmem_cache_destroy(brt_pending_entry_cache);
}
static brt_entry_t *
brt_entry_alloc(const brt_entry_t *bre_init)
{
brt_entry_t *bre;
bre = kmem_cache_alloc(brt_entry_cache, KM_SLEEP);
bre->bre_offset = bre_init->bre_offset;
bre->bre_refcount = bre_init->bre_refcount;
return (bre);
}
static void
brt_entry_free(brt_entry_t *bre)
{
kmem_cache_free(brt_entry_cache, bre);
}
static void
brt_entry_addref(brt_t *brt, const blkptr_t *bp)
{
brt_vdev_t *brtvd;
brt_entry_t *bre, *racebre;
brt_entry_t bre_search;
avl_index_t where;
uint64_t vdevid;
int error;
ASSERT(!RW_WRITE_HELD(&brt->brt_lock));
brt_entry_fill(bp, &bre_search, &vdevid);
brt_wlock(brt);
brtvd = brt_vdev(brt, vdevid);
if (brtvd == NULL) {
ASSERT3U(vdevid, >=, brt->brt_nvdevs);
/* New VDEV was added. */
brt_vdevs_expand(brt, vdevid + 1);
brtvd = brt_vdev(brt, vdevid);
}
ASSERT(brtvd != NULL);
if (!brtvd->bv_initiated)
brt_vdev_realloc(brt, brtvd);
bre = avl_find(&brtvd->bv_tree, &bre_search, NULL);
if (bre != NULL) {
BRTSTAT_BUMP(brt_addref_entry_in_memory);
} else {
/*
* brt_entry_lookup() may drop the BRT (read) lock and
* reacquire it (write).
*/
error = brt_entry_lookup(brt, brtvd, &bre_search);
/* bre_search now contains correct bre_refcount */
ASSERT(error == 0 || error == ENOENT);
if (error == 0)
BRTSTAT_BUMP(brt_addref_entry_on_disk);
else
BRTSTAT_BUMP(brt_addref_entry_not_on_disk);
/*
* When the BRT lock was dropped, brt_vdevs[] may have been
* expanded and reallocated, we need to update brtvd's pointer.
*/
brtvd = brt_vdev(brt, vdevid);
ASSERT(brtvd != NULL);
racebre = avl_find(&brtvd->bv_tree, &bre_search, &where);
if (racebre == NULL) {
bre = brt_entry_alloc(&bre_search);
ASSERT(RW_WRITE_HELD(&brt->brt_lock));
avl_insert(&brtvd->bv_tree, bre, where);
brt->brt_nentries++;
} else {
/*
* The entry was added when the BRT lock was dropped in
* brt_entry_lookup().
*/
BRTSTAT_BUMP(brt_addref_entry_read_lost_race);
bre = racebre;
}
}
bre->bre_refcount++;
brt_vdev_addref(brt, brtvd, bre, bp_get_dsize(brt->brt_spa, bp));
brt_unlock(brt);
}
/* Return TRUE if block should be freed immediately. */
boolean_t
brt_entry_decref(spa_t *spa, const blkptr_t *bp)
{
brt_t *brt = spa->spa_brt;
brt_vdev_t *brtvd;
brt_entry_t *bre, *racebre;
brt_entry_t bre_search;
avl_index_t where;
uint64_t vdevid;
int error;
brt_entry_fill(bp, &bre_search, &vdevid);
brt_wlock(brt);
brtvd = brt_vdev(brt, vdevid);
ASSERT(brtvd != NULL);
bre = avl_find(&brtvd->bv_tree, &bre_search, NULL);
if (bre != NULL) {
BRTSTAT_BUMP(brt_decref_entry_in_memory);
goto out;
} else {
BRTSTAT_BUMP(brt_decref_entry_not_in_memory);
}
/*
* brt_entry_lookup() may drop the BRT lock and reacquire it.
*/
error = brt_entry_lookup(brt, brtvd, &bre_search);
/* bre_search now contains correct bre_refcount */
ASSERT(error == 0 || error == ENOENT);
/*
* When the BRT lock was dropped, brt_vdevs[] may have been expanded
* and reallocated, we need to update brtvd's pointer.
*/
brtvd = brt_vdev(brt, vdevid);
ASSERT(brtvd != NULL);
if (error == ENOENT) {
BRTSTAT_BUMP(brt_decref_entry_not_on_disk);
bre = NULL;
goto out;
}
racebre = avl_find(&brtvd->bv_tree, &bre_search, &where);
if (racebre != NULL) {
/*
* The entry was added when the BRT lock was dropped in
* brt_entry_lookup().
*/
BRTSTAT_BUMP(brt_decref_entry_read_lost_race);
bre = racebre;
goto out;
}
BRTSTAT_BUMP(brt_decref_entry_loaded_from_disk);
bre = brt_entry_alloc(&bre_search);
ASSERT(RW_WRITE_HELD(&brt->brt_lock));
avl_insert(&brtvd->bv_tree, bre, where);
brt->brt_nentries++;
out:
if (bre == NULL) {
/*
* This is a free of a regular (not cloned) block.
*/
brt_unlock(brt);
BRTSTAT_BUMP(brt_decref_no_entry);
return (B_TRUE);
}
if (bre->bre_refcount == 0) {
brt_unlock(brt);
BRTSTAT_BUMP(brt_decref_free_data_now);
return (B_TRUE);
}
ASSERT(bre->bre_refcount > 0);
bre->bre_refcount--;
if (bre->bre_refcount == 0)
BRTSTAT_BUMP(brt_decref_free_data_later);
else
BRTSTAT_BUMP(brt_decref_entry_still_referenced);
brt_vdev_decref(brt, brtvd, bre, bp_get_dsize(brt->brt_spa, bp));
brt_unlock(brt);
return (B_FALSE);
}
+uint64_t
+brt_entry_get_refcount(spa_t *spa, const blkptr_t *bp)
+{
+ brt_t *brt = spa->spa_brt;
+ brt_vdev_t *brtvd;
+ brt_entry_t bre_search, *bre;
+ uint64_t vdevid, refcnt;
+ int error;
+
+ brt_entry_fill(bp, &bre_search, &vdevid);
+
+ brt_rlock(brt);
+
+ brtvd = brt_vdev(brt, vdevid);
+ ASSERT(brtvd != NULL);
+
+ bre = avl_find(&brtvd->bv_tree, &bre_search, NULL);
+ if (bre == NULL) {
+ error = brt_entry_lookup(brt, brtvd, &bre_search);
+ ASSERT(error == 0 || error == ENOENT);
+ if (error == ENOENT)
+ refcnt = 0;
+ else
+ refcnt = bre_search.bre_refcount;
+ } else
+ refcnt = bre->bre_refcount;
+
+ brt_unlock(brt);
+ return (refcnt);
+}
+
static void
brt_prefetch(brt_t *brt, const blkptr_t *bp)
{
brt_entry_t bre;
uint64_t vdevid;
ASSERT(bp != NULL);
if (!zfs_brt_prefetch)
return;
brt_entry_fill(bp, &bre, &vdevid);
brt_entry_prefetch(brt, vdevid, &bre);
}
static int
brt_pending_entry_compare(const void *x1, const void *x2)
{
const brt_pending_entry_t *bpe1 = x1, *bpe2 = x2;
const blkptr_t *bp1 = &bpe1->bpe_bp, *bp2 = &bpe2->bpe_bp;
int cmp;
cmp = TREE_CMP(BP_PHYSICAL_BIRTH(bp1), BP_PHYSICAL_BIRTH(bp2));
if (cmp == 0) {
cmp = TREE_CMP(DVA_GET_VDEV(&bp1->blk_dva[0]),
DVA_GET_VDEV(&bp2->blk_dva[0]));
if (cmp == 0) {
cmp = TREE_CMP(DVA_GET_OFFSET(&bp1->blk_dva[0]),
DVA_GET_OFFSET(&bp2->blk_dva[0]));
}
}
return (cmp);
}
void
brt_pending_add(spa_t *spa, const blkptr_t *bp, dmu_tx_t *tx)
{
brt_t *brt;
avl_tree_t *pending_tree;
kmutex_t *pending_lock;
brt_pending_entry_t *bpe, *newbpe;
avl_index_t where;
uint64_t txg;
brt = spa->spa_brt;
txg = dmu_tx_get_txg(tx);
ASSERT3U(txg, !=, 0);
pending_tree = &brt->brt_pending_tree[txg & TXG_MASK];
pending_lock = &brt->brt_pending_lock[txg & TXG_MASK];
newbpe = kmem_cache_alloc(brt_pending_entry_cache, KM_SLEEP);
newbpe->bpe_bp = *bp;
newbpe->bpe_count = 1;
mutex_enter(pending_lock);
bpe = avl_find(pending_tree, newbpe, &where);
if (bpe == NULL) {
avl_insert(pending_tree, newbpe, where);
newbpe = NULL;
} else {
bpe->bpe_count++;
}
mutex_exit(pending_lock);
if (newbpe != NULL) {
ASSERT(bpe != NULL);
ASSERT(bpe != newbpe);
kmem_cache_free(brt_pending_entry_cache, newbpe);
} else {
ASSERT(bpe == NULL);
}
/* Prefetch BRT entry, as we will need it in the syncing context. */
brt_prefetch(brt, bp);
}
void
brt_pending_remove(spa_t *spa, const blkptr_t *bp, dmu_tx_t *tx)
{
brt_t *brt;
avl_tree_t *pending_tree;
kmutex_t *pending_lock;
brt_pending_entry_t *bpe, bpe_search;
uint64_t txg;
brt = spa->spa_brt;
txg = dmu_tx_get_txg(tx);
ASSERT3U(txg, !=, 0);
pending_tree = &brt->brt_pending_tree[txg & TXG_MASK];
pending_lock = &brt->brt_pending_lock[txg & TXG_MASK];
bpe_search.bpe_bp = *bp;
mutex_enter(pending_lock);
bpe = avl_find(pending_tree, &bpe_search, NULL);
/* I believe we should always find bpe when this function is called. */
if (bpe != NULL) {
ASSERT(bpe->bpe_count > 0);
bpe->bpe_count--;
if (bpe->bpe_count == 0) {
avl_remove(pending_tree, bpe);
kmem_cache_free(brt_pending_entry_cache, bpe);
}
}
mutex_exit(pending_lock);
}
void
brt_pending_apply(spa_t *spa, uint64_t txg)
{
brt_t *brt;
brt_pending_entry_t *bpe;
avl_tree_t *pending_tree;
kmutex_t *pending_lock;
void *c;
ASSERT3U(txg, !=, 0);
brt = spa->spa_brt;
pending_tree = &brt->brt_pending_tree[txg & TXG_MASK];
pending_lock = &brt->brt_pending_lock[txg & TXG_MASK];
mutex_enter(pending_lock);
c = NULL;
while ((bpe = avl_destroy_nodes(pending_tree, &c)) != NULL) {
boolean_t added_to_ddt;
mutex_exit(pending_lock);
for (int i = 0; i < bpe->bpe_count; i++) {
/*
* If the block has DEDUP bit set, it means that it
* already exists in the DEDUP table, so we can just
* use that instead of creating new entry in
* the BRT table.
*/
if (BP_GET_DEDUP(&bpe->bpe_bp)) {
added_to_ddt = ddt_addref(spa, &bpe->bpe_bp);
} else {
added_to_ddt = B_FALSE;
}
if (!added_to_ddt)
brt_entry_addref(brt, &bpe->bpe_bp);
}
kmem_cache_free(brt_pending_entry_cache, bpe);
mutex_enter(pending_lock);
}
mutex_exit(pending_lock);
}
static void
brt_sync_entry(brt_t *brt, brt_vdev_t *brtvd, brt_entry_t *bre, dmu_tx_t *tx)
{
ASSERT(RW_WRITE_HELD(&brt->brt_lock));
ASSERT(brtvd->bv_mos_entries != 0);
if (bre->bre_refcount == 0) {
int error;
error = brt_entry_remove(brt, brtvd, bre, tx);
ASSERT(error == 0 || error == ENOENT);
/*
* If error == ENOENT then zfs_clone_range() was done from a
* removed (but opened) file (open(), unlink()).
*/
ASSERT(brt_entry_lookup(brt, brtvd, bre) == ENOENT);
} else {
VERIFY0(brt_entry_update(brt, brtvd, bre, tx));
}
}
static void
brt_sync_table(brt_t *brt, dmu_tx_t *tx)
{
brt_vdev_t *brtvd;
brt_entry_t *bre;
uint64_t vdevid;
void *c;
brt_wlock(brt);
for (vdevid = 0; vdevid < brt->brt_nvdevs; vdevid++) {
brtvd = &brt->brt_vdevs[vdevid];
if (!brtvd->bv_initiated)
continue;
if (!brtvd->bv_meta_dirty) {
ASSERT(!brtvd->bv_entcount_dirty);
ASSERT0(avl_numnodes(&brtvd->bv_tree));
continue;
}
ASSERT(!brtvd->bv_entcount_dirty ||
avl_numnodes(&brtvd->bv_tree) != 0);
if (brtvd->bv_mos_brtvdev == 0)
brt_vdev_create(brt, brtvd, tx);
c = NULL;
while ((bre = avl_destroy_nodes(&brtvd->bv_tree, &c)) != NULL) {
brt_sync_entry(brt, brtvd, bre, tx);
brt_entry_free(bre);
ASSERT(brt->brt_nentries > 0);
brt->brt_nentries--;
}
brt_vdev_sync(brt, brtvd, tx);
if (brtvd->bv_totalcount == 0)
brt_vdev_destroy(brt, brtvd, tx);
}
ASSERT0(brt->brt_nentries);
brt_unlock(brt);
}
void
brt_sync(spa_t *spa, uint64_t txg)
{
dmu_tx_t *tx;
brt_t *brt;
ASSERT(spa_syncing_txg(spa) == txg);
brt = spa->spa_brt;
brt_rlock(brt);
if (brt->brt_nentries == 0) {
/* No changes. */
brt_unlock(brt);
return;
}
brt_unlock(brt);
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
brt_sync_table(brt, tx);
dmu_tx_commit(tx);
}
static void
brt_table_alloc(brt_t *brt)
{
for (int i = 0; i < TXG_SIZE; i++) {
avl_create(&brt->brt_pending_tree[i],
brt_pending_entry_compare,
sizeof (brt_pending_entry_t),
offsetof(brt_pending_entry_t, bpe_node));
mutex_init(&brt->brt_pending_lock[i], NULL, MUTEX_DEFAULT,
NULL);
}
}
static void
brt_table_free(brt_t *brt)
{
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT(avl_is_empty(&brt->brt_pending_tree[i]));
avl_destroy(&brt->brt_pending_tree[i]);
mutex_destroy(&brt->brt_pending_lock[i]);
}
}
static void
brt_alloc(spa_t *spa)
{
brt_t *brt;
ASSERT(spa->spa_brt == NULL);
brt = kmem_zalloc(sizeof (*brt), KM_SLEEP);
rw_init(&brt->brt_lock, NULL, RW_DEFAULT, NULL);
brt->brt_spa = spa;
brt->brt_rangesize = 0;
brt->brt_nentries = 0;
brt->brt_vdevs = NULL;
brt->brt_nvdevs = 0;
brt_table_alloc(brt);
spa->spa_brt = brt;
}
void
brt_create(spa_t *spa)
{
brt_alloc(spa);
brt_vdevs_alloc(spa->spa_brt, B_FALSE);
}
int
brt_load(spa_t *spa)
{
brt_alloc(spa);
brt_vdevs_alloc(spa->spa_brt, B_TRUE);
return (0);
}
void
brt_unload(spa_t *spa)
{
brt_t *brt = spa->spa_brt;
if (brt == NULL)
return;
brt_vdevs_free(brt);
brt_table_free(brt);
rw_destroy(&brt->brt_lock);
kmem_free(brt, sizeof (*brt));
spa->spa_brt = NULL;
}
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_brt, zfs_brt_, prefetch, INT, ZMOD_RW,
"Enable prefetching of BRT entries");
#ifdef ZFS_BRT_DEBUG
ZFS_MODULE_PARAM(zfs_brt, zfs_brt_, debug, INT, ZMOD_RW, "BRT debug");
#endif
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/dbuf.c b/sys/contrib/openzfs/module/zfs/dbuf.c
index 1ea075217fb1..f2831a0e8abf 100644
--- a/sys/contrib/openzfs/module/zfs/dbuf.c
+++ b/sys/contrib/openzfs/module/zfs/dbuf.c
@@ -1,5184 +1,5193 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
*/
#include <sys/zfs_context.h>
#include <sys/arc.h>
#include <sys/dmu.h>
#include <sys/dmu_send.h>
#include <sys/dmu_impl.h>
#include <sys/dbuf.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dmu_tx.h>
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/dmu_zfetch.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/zfeature.h>
#include <sys/blkptr.h>
#include <sys/range_tree.h>
#include <sys/trace_zfs.h>
#include <sys/callb.h>
#include <sys/abd.h>
#include <sys/brt.h>
#include <sys/vdev.h>
#include <cityhash.h>
#include <sys/spa_impl.h>
#include <sys/wmsum.h>
#include <sys/vdev_impl.h>
static kstat_t *dbuf_ksp;
typedef struct dbuf_stats {
/*
* Various statistics about the size of the dbuf cache.
*/
kstat_named_t cache_count;
kstat_named_t cache_size_bytes;
kstat_named_t cache_size_bytes_max;
/*
* Statistics regarding the bounds on the dbuf cache size.
*/
kstat_named_t cache_target_bytes;
kstat_named_t cache_lowater_bytes;
kstat_named_t cache_hiwater_bytes;
/*
* Total number of dbuf cache evictions that have occurred.
*/
kstat_named_t cache_total_evicts;
/*
* The distribution of dbuf levels in the dbuf cache and
* the total size of all dbufs at each level.
*/
kstat_named_t cache_levels[DN_MAX_LEVELS];
kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
/*
* Statistics about the dbuf hash table.
*/
kstat_named_t hash_hits;
kstat_named_t hash_misses;
kstat_named_t hash_collisions;
kstat_named_t hash_elements;
kstat_named_t hash_elements_max;
/*
* Number of sublists containing more than one dbuf in the dbuf
* hash table. Keep track of the longest hash chain.
*/
kstat_named_t hash_chains;
kstat_named_t hash_chain_max;
/*
* Number of times a dbuf_create() discovers that a dbuf was
* already created and in the dbuf hash table.
*/
kstat_named_t hash_insert_race;
/*
* Number of entries in the hash table dbuf and mutex arrays.
*/
kstat_named_t hash_table_count;
kstat_named_t hash_mutex_count;
/*
* Statistics about the size of the metadata dbuf cache.
*/
kstat_named_t metadata_cache_count;
kstat_named_t metadata_cache_size_bytes;
kstat_named_t metadata_cache_size_bytes_max;
/*
* For diagnostic purposes, this is incremented whenever we can't add
* something to the metadata cache because it's full, and instead put
* the data in the regular dbuf cache.
*/
kstat_named_t metadata_cache_overflow;
} dbuf_stats_t;
dbuf_stats_t dbuf_stats = {
{ "cache_count", KSTAT_DATA_UINT64 },
{ "cache_size_bytes", KSTAT_DATA_UINT64 },
{ "cache_size_bytes_max", KSTAT_DATA_UINT64 },
{ "cache_target_bytes", KSTAT_DATA_UINT64 },
{ "cache_lowater_bytes", KSTAT_DATA_UINT64 },
{ "cache_hiwater_bytes", KSTAT_DATA_UINT64 },
{ "cache_total_evicts", KSTAT_DATA_UINT64 },
{ { "cache_levels_N", KSTAT_DATA_UINT64 } },
{ { "cache_levels_bytes_N", KSTAT_DATA_UINT64 } },
{ "hash_hits", KSTAT_DATA_UINT64 },
{ "hash_misses", KSTAT_DATA_UINT64 },
{ "hash_collisions", KSTAT_DATA_UINT64 },
{ "hash_elements", KSTAT_DATA_UINT64 },
{ "hash_elements_max", KSTAT_DATA_UINT64 },
{ "hash_chains", KSTAT_DATA_UINT64 },
{ "hash_chain_max", KSTAT_DATA_UINT64 },
{ "hash_insert_race", KSTAT_DATA_UINT64 },
{ "hash_table_count", KSTAT_DATA_UINT64 },
{ "hash_mutex_count", KSTAT_DATA_UINT64 },
{ "metadata_cache_count", KSTAT_DATA_UINT64 },
{ "metadata_cache_size_bytes", KSTAT_DATA_UINT64 },
{ "metadata_cache_size_bytes_max", KSTAT_DATA_UINT64 },
{ "metadata_cache_overflow", KSTAT_DATA_UINT64 }
};
struct {
wmsum_t cache_count;
wmsum_t cache_total_evicts;
wmsum_t cache_levels[DN_MAX_LEVELS];
wmsum_t cache_levels_bytes[DN_MAX_LEVELS];
wmsum_t hash_hits;
wmsum_t hash_misses;
wmsum_t hash_collisions;
wmsum_t hash_chains;
wmsum_t hash_insert_race;
wmsum_t metadata_cache_count;
wmsum_t metadata_cache_overflow;
} dbuf_sums;
#define DBUF_STAT_INCR(stat, val) \
wmsum_add(&dbuf_sums.stat, val);
#define DBUF_STAT_DECR(stat, val) \
DBUF_STAT_INCR(stat, -(val));
#define DBUF_STAT_BUMP(stat) \
DBUF_STAT_INCR(stat, 1);
#define DBUF_STAT_BUMPDOWN(stat) \
DBUF_STAT_INCR(stat, -1);
#define DBUF_STAT_MAX(stat, v) { \
uint64_t _m; \
while ((v) > (_m = dbuf_stats.stat.value.ui64) && \
(_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
continue; \
}
static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr);
static int dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags);
/*
* Global data structures and functions for the dbuf cache.
*/
static kmem_cache_t *dbuf_kmem_cache;
static taskq_t *dbu_evict_taskq;
static kthread_t *dbuf_cache_evict_thread;
static kmutex_t dbuf_evict_lock;
static kcondvar_t dbuf_evict_cv;
static boolean_t dbuf_evict_thread_exit;
/*
* There are two dbuf caches; each dbuf can only be in one of them at a time.
*
* 1. Cache of metadata dbufs, to help make read-heavy administrative commands
* from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
* that represent the metadata that describes filesystems/snapshots/
* bookmarks/properties/etc. We only evict from this cache when we export a
* pool, to short-circuit as much I/O as possible for all administrative
* commands that need the metadata. There is no eviction policy for this
* cache, because we try to only include types in it which would occupy a
* very small amount of space per object but create a large impact on the
* performance of these commands. Instead, after it reaches a maximum size
* (which should only happen on very small memory systems with a very large
* number of filesystem objects), we stop taking new dbufs into the
* metadata cache, instead putting them in the normal dbuf cache.
*
* 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
* are not currently held but have been recently released. These dbufs
* are not eligible for arc eviction until they are aged out of the cache.
* Dbufs that are aged out of the cache will be immediately destroyed and
* become eligible for arc eviction.
*
* Dbufs are added to these caches once the last hold is released. If a dbuf is
* later accessed and still exists in the dbuf cache, then it will be removed
* from the cache and later re-added to the head of the cache.
*
* If a given dbuf meets the requirements for the metadata cache, it will go
* there, otherwise it will be considered for the generic LRU dbuf cache. The
* caches and the refcounts tracking their sizes are stored in an array indexed
* by those caches' matching enum values (from dbuf_cached_state_t).
*/
typedef struct dbuf_cache {
multilist_t cache;
zfs_refcount_t size ____cacheline_aligned;
} dbuf_cache_t;
dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
/* Size limits for the caches */
static uint64_t dbuf_cache_max_bytes = UINT64_MAX;
static uint64_t dbuf_metadata_cache_max_bytes = UINT64_MAX;
/* Set the default sizes of the caches to log2 fraction of arc size */
static uint_t dbuf_cache_shift = 5;
static uint_t dbuf_metadata_cache_shift = 6;
/* Set the dbuf hash mutex count as log2 shift (dynamic by default) */
static uint_t dbuf_mutex_cache_shift = 0;
static unsigned long dbuf_cache_target_bytes(void);
static unsigned long dbuf_metadata_cache_target_bytes(void);
/*
* The LRU dbuf cache uses a three-stage eviction policy:
* - A low water marker designates when the dbuf eviction thread
* should stop evicting from the dbuf cache.
* - When we reach the maximum size (aka mid water mark), we
* signal the eviction thread to run.
* - The high water mark indicates when the eviction thread
* is unable to keep up with the incoming load and eviction must
* happen in the context of the calling thread.
*
* The dbuf cache:
* (max size)
* low water mid water hi water
* +----------------------------------------+----------+----------+
* | | | |
* | | | |
* | | | |
* | | | |
* +----------------------------------------+----------+----------+
* stop signal evict
* evicting eviction directly
* thread
*
* The high and low water marks indicate the operating range for the eviction
* thread. The low water mark is, by default, 90% of the total size of the
* cache and the high water mark is at 110% (both of these percentages can be
* changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
* respectively). The eviction thread will try to ensure that the cache remains
* within this range by waking up every second and checking if the cache is
* above the low water mark. The thread can also be woken up by callers adding
* elements into the cache if the cache is larger than the mid water (i.e max
* cache size). Once the eviction thread is woken up and eviction is required,
* it will continue evicting buffers until it's able to reduce the cache size
* to the low water mark. If the cache size continues to grow and hits the high
* water mark, then callers adding elements to the cache will begin to evict
* directly from the cache until the cache is no longer above the high water
* mark.
*/
/*
* The percentage above and below the maximum cache size.
*/
static uint_t dbuf_cache_hiwater_pct = 10;
static uint_t dbuf_cache_lowater_pct = 10;
static int
dbuf_cons(void *vdb, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
dmu_buf_impl_t *db = vdb;
memset(db, 0, sizeof (dmu_buf_impl_t));
mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL);
cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
multilist_link_init(&db->db_cache_link);
zfs_refcount_create(&db->db_holds);
return (0);
}
static void
dbuf_dest(void *vdb, void *unused)
{
(void) unused;
dmu_buf_impl_t *db = vdb;
mutex_destroy(&db->db_mtx);
rw_destroy(&db->db_rwlock);
cv_destroy(&db->db_changed);
ASSERT(!multilist_link_active(&db->db_cache_link));
zfs_refcount_destroy(&db->db_holds);
}
/*
* dbuf hash table routines
*/
static dbuf_hash_table_t dbuf_hash_table;
/*
* We use Cityhash for this. It's fast, and has good hash properties without
* requiring any large static buffers.
*/
static uint64_t
dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
{
return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
}
#define DTRACE_SET_STATE(db, why) \
DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db, \
const char *, why)
#define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
((dbuf)->db.db_object == (obj) && \
(dbuf)->db_objset == (os) && \
(dbuf)->db_level == (level) && \
(dbuf)->db_blkid == (blkid))
dmu_buf_impl_t *
dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid,
uint64_t *hash_out)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
uint64_t hv;
uint64_t idx;
dmu_buf_impl_t *db;
hv = dbuf_hash(os, obj, level, blkid);
idx = hv & h->hash_table_mask;
mutex_enter(DBUF_HASH_MUTEX(h, idx));
for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
if (DBUF_EQUAL(db, os, obj, level, blkid)) {
mutex_enter(&db->db_mtx);
if (db->db_state != DB_EVICTING) {
mutex_exit(DBUF_HASH_MUTEX(h, idx));
return (db);
}
mutex_exit(&db->db_mtx);
}
}
mutex_exit(DBUF_HASH_MUTEX(h, idx));
if (hash_out != NULL)
*hash_out = hv;
return (NULL);
}
static dmu_buf_impl_t *
dbuf_find_bonus(objset_t *os, uint64_t object)
{
dnode_t *dn;
dmu_buf_impl_t *db = NULL;
if (dnode_hold(os, object, FTAG, &dn) == 0) {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_bonus != NULL) {
db = dn->dn_bonus;
mutex_enter(&db->db_mtx);
}
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
}
return (db);
}
/*
* Insert an entry into the hash table. If there is already an element
* equal to elem in the hash table, then the already existing element
* will be returned and the new element will not be inserted.
* Otherwise returns NULL.
*/
static dmu_buf_impl_t *
dbuf_hash_insert(dmu_buf_impl_t *db)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
objset_t *os = db->db_objset;
uint64_t obj = db->db.db_object;
int level = db->db_level;
uint64_t blkid, idx;
dmu_buf_impl_t *dbf;
uint32_t i;
blkid = db->db_blkid;
ASSERT3U(dbuf_hash(os, obj, level, blkid), ==, db->db_hash);
idx = db->db_hash & h->hash_table_mask;
mutex_enter(DBUF_HASH_MUTEX(h, idx));
for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
dbf = dbf->db_hash_next, i++) {
if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
mutex_enter(&dbf->db_mtx);
if (dbf->db_state != DB_EVICTING) {
mutex_exit(DBUF_HASH_MUTEX(h, idx));
return (dbf);
}
mutex_exit(&dbf->db_mtx);
}
}
if (i > 0) {
DBUF_STAT_BUMP(hash_collisions);
if (i == 1)
DBUF_STAT_BUMP(hash_chains);
DBUF_STAT_MAX(hash_chain_max, i);
}
mutex_enter(&db->db_mtx);
db->db_hash_next = h->hash_table[idx];
h->hash_table[idx] = db;
mutex_exit(DBUF_HASH_MUTEX(h, idx));
uint64_t he = atomic_inc_64_nv(&dbuf_stats.hash_elements.value.ui64);
DBUF_STAT_MAX(hash_elements_max, he);
return (NULL);
}
/*
* This returns whether this dbuf should be stored in the metadata cache, which
* is based on whether it's from one of the dnode types that store data related
* to traversing dataset hierarchies.
*/
static boolean_t
dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
{
DB_DNODE_ENTER(db);
dmu_object_type_t type = DB_DNODE(db)->dn_type;
DB_DNODE_EXIT(db);
/* Check if this dbuf is one of the types we care about */
if (DMU_OT_IS_METADATA_CACHED(type)) {
/* If we hit this, then we set something up wrong in dmu_ot */
ASSERT(DMU_OT_IS_METADATA(type));
/*
* Sanity check for small-memory systems: don't allocate too
* much memory for this purpose.
*/
if (zfs_refcount_count(
&dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
dbuf_metadata_cache_target_bytes()) {
DBUF_STAT_BUMP(metadata_cache_overflow);
return (B_FALSE);
}
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Remove an entry from the hash table. It must be in the EVICTING state.
*/
static void
dbuf_hash_remove(dmu_buf_impl_t *db)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
uint64_t idx;
dmu_buf_impl_t *dbf, **dbp;
ASSERT3U(dbuf_hash(db->db_objset, db->db.db_object, db->db_level,
db->db_blkid), ==, db->db_hash);
idx = db->db_hash & h->hash_table_mask;
/*
* We mustn't hold db_mtx to maintain lock ordering:
* DBUF_HASH_MUTEX > db_mtx.
*/
ASSERT(zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_state == DB_EVICTING);
ASSERT(!MUTEX_HELD(&db->db_mtx));
mutex_enter(DBUF_HASH_MUTEX(h, idx));
dbp = &h->hash_table[idx];
while ((dbf = *dbp) != db) {
dbp = &dbf->db_hash_next;
ASSERT(dbf != NULL);
}
*dbp = db->db_hash_next;
db->db_hash_next = NULL;
if (h->hash_table[idx] &&
h->hash_table[idx]->db_hash_next == NULL)
DBUF_STAT_BUMPDOWN(hash_chains);
mutex_exit(DBUF_HASH_MUTEX(h, idx));
atomic_dec_64(&dbuf_stats.hash_elements.value.ui64);
}
typedef enum {
DBVU_EVICTING,
DBVU_NOT_EVICTING
} dbvu_verify_type_t;
static void
dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
{
#ifdef ZFS_DEBUG
int64_t holds;
if (db->db_user == NULL)
return;
/* Only data blocks support the attachment of user data. */
ASSERT(db->db_level == 0);
/* Clients must resolve a dbuf before attaching user data. */
ASSERT(db->db.db_data != NULL);
ASSERT3U(db->db_state, ==, DB_CACHED);
holds = zfs_refcount_count(&db->db_holds);
if (verify_type == DBVU_EVICTING) {
/*
* Immediate eviction occurs when holds == dirtycnt.
* For normal eviction buffers, holds is zero on
* eviction, except when dbuf_fix_old_data() calls
* dbuf_clear_data(). However, the hold count can grow
* during eviction even though db_mtx is held (see
* dmu_bonus_hold() for an example), so we can only
* test the generic invariant that holds >= dirtycnt.
*/
ASSERT3U(holds, >=, db->db_dirtycnt);
} else {
if (db->db_user_immediate_evict == TRUE)
ASSERT3U(holds, >=, db->db_dirtycnt);
else
ASSERT3U(holds, >, 0);
}
#endif
}
static void
dbuf_evict_user(dmu_buf_impl_t *db)
{
dmu_buf_user_t *dbu = db->db_user;
ASSERT(MUTEX_HELD(&db->db_mtx));
if (dbu == NULL)
return;
dbuf_verify_user(db, DBVU_EVICTING);
db->db_user = NULL;
#ifdef ZFS_DEBUG
if (dbu->dbu_clear_on_evict_dbufp != NULL)
*dbu->dbu_clear_on_evict_dbufp = NULL;
#endif
/*
* There are two eviction callbacks - one that we call synchronously
* and one that we invoke via a taskq. The async one is useful for
* avoiding lock order reversals and limiting stack depth.
*
* Note that if we have a sync callback but no async callback,
* it's likely that the sync callback will free the structure
* containing the dbu. In that case we need to take care to not
* dereference dbu after calling the sync evict func.
*/
boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
if (dbu->dbu_evict_func_sync != NULL)
dbu->dbu_evict_func_sync(dbu);
if (has_async) {
taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
dbu, 0, &dbu->dbu_tqent);
}
}
boolean_t
dbuf_is_metadata(dmu_buf_impl_t *db)
{
/*
* Consider indirect blocks and spill blocks to be meta data.
*/
if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
return (B_TRUE);
} else {
boolean_t is_metadata;
DB_DNODE_ENTER(db);
is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
DB_DNODE_EXIT(db);
return (is_metadata);
}
}
/*
* We want to exclude buffers that are on a special allocation class from
* L2ARC.
*/
boolean_t
dbuf_is_l2cacheable(dmu_buf_impl_t *db)
{
if (db->db_objset->os_secondary_cache == ZFS_CACHE_ALL ||
(db->db_objset->os_secondary_cache ==
ZFS_CACHE_METADATA && dbuf_is_metadata(db))) {
if (l2arc_exclude_special == 0)
return (B_TRUE);
blkptr_t *bp = db->db_blkptr;
if (bp == NULL || BP_IS_HOLE(bp))
return (B_FALSE);
uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
vdev_t *rvd = db->db_objset->os_spa->spa_root_vdev;
vdev_t *vd = NULL;
if (vdev < rvd->vdev_children)
vd = rvd->vdev_child[vdev];
if (vd == NULL)
return (B_TRUE);
if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
return (B_TRUE);
}
return (B_FALSE);
}
static inline boolean_t
dnode_level_is_l2cacheable(blkptr_t *bp, dnode_t *dn, int64_t level)
{
if (dn->dn_objset->os_secondary_cache == ZFS_CACHE_ALL ||
(dn->dn_objset->os_secondary_cache == ZFS_CACHE_METADATA &&
(level > 0 ||
DMU_OT_IS_METADATA(dn->dn_handle->dnh_dnode->dn_type)))) {
if (l2arc_exclude_special == 0)
return (B_TRUE);
if (bp == NULL || BP_IS_HOLE(bp))
return (B_FALSE);
uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
vdev_t *rvd = dn->dn_objset->os_spa->spa_root_vdev;
vdev_t *vd = NULL;
if (vdev < rvd->vdev_children)
vd = rvd->vdev_child[vdev];
if (vd == NULL)
return (B_TRUE);
if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* This function *must* return indices evenly distributed between all
* sublists of the multilist. This is needed due to how the dbuf eviction
* code is laid out; dbuf_evict_thread() assumes dbufs are evenly
* distributed between all sublists and uses this assumption when
* deciding which sublist to evict from and how much to evict from it.
*/
static unsigned int
dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
{
dmu_buf_impl_t *db = obj;
/*
* The assumption here, is the hash value for a given
* dmu_buf_impl_t will remain constant throughout it's lifetime
* (i.e. it's objset, object, level and blkid fields don't change).
* Thus, we don't need to store the dbuf's sublist index
* on insertion, as this index can be recalculated on removal.
*
* Also, the low order bits of the hash value are thought to be
* distributed evenly. Otherwise, in the case that the multilist
* has a power of two number of sublists, each sublists' usage
* would not be evenly distributed. In this context full 64bit
* division would be a waste of time, so limit it to 32 bits.
*/
return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object,
db->db_level, db->db_blkid) %
multilist_get_num_sublists(ml));
}
/*
* The target size of the dbuf cache can grow with the ARC target,
* unless limited by the tunable dbuf_cache_max_bytes.
*/
static inline unsigned long
dbuf_cache_target_bytes(void)
{
return (MIN(dbuf_cache_max_bytes,
arc_target_bytes() >> dbuf_cache_shift));
}
/*
* The target size of the dbuf metadata cache can grow with the ARC target,
* unless limited by the tunable dbuf_metadata_cache_max_bytes.
*/
static inline unsigned long
dbuf_metadata_cache_target_bytes(void)
{
return (MIN(dbuf_metadata_cache_max_bytes,
arc_target_bytes() >> dbuf_metadata_cache_shift));
}
static inline uint64_t
dbuf_cache_hiwater_bytes(void)
{
uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
return (dbuf_cache_target +
(dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
}
static inline uint64_t
dbuf_cache_lowater_bytes(void)
{
uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
return (dbuf_cache_target -
(dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
}
static inline boolean_t
dbuf_cache_above_lowater(void)
{
return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
dbuf_cache_lowater_bytes());
}
/*
* Evict the oldest eligible dbuf from the dbuf cache.
*/
static void
dbuf_evict_one(void)
{
int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache);
multilist_sublist_t *mls = multilist_sublist_lock(
&dbuf_caches[DB_DBUF_CACHE].cache, idx);
ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
dmu_buf_impl_t *db = multilist_sublist_tail(mls);
while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
db = multilist_sublist_prev(mls, db);
}
DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
multilist_sublist_t *, mls);
if (db != NULL) {
multilist_sublist_remove(mls, db);
multilist_sublist_unlock(mls);
(void) zfs_refcount_remove_many(
&dbuf_caches[DB_DBUF_CACHE].size, db->db.db_size, db);
DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
DBUF_STAT_BUMPDOWN(cache_count);
DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
db->db.db_size);
ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
db->db_caching_status = DB_NO_CACHE;
dbuf_destroy(db);
DBUF_STAT_BUMP(cache_total_evicts);
} else {
multilist_sublist_unlock(mls);
}
}
/*
* The dbuf evict thread is responsible for aging out dbufs from the
* cache. Once the cache has reached it's maximum size, dbufs are removed
* and destroyed. The eviction thread will continue running until the size
* of the dbuf cache is at or below the maximum size. Once the dbuf is aged
* out of the cache it is destroyed and becomes eligible for arc eviction.
*/
static __attribute__((noreturn)) void
dbuf_evict_thread(void *unused)
{
(void) unused;
callb_cpr_t cpr;
CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
mutex_enter(&dbuf_evict_lock);
while (!dbuf_evict_thread_exit) {
while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
CALLB_CPR_SAFE_BEGIN(&cpr);
(void) cv_timedwait_idle_hires(&dbuf_evict_cv,
&dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
}
mutex_exit(&dbuf_evict_lock);
/*
* Keep evicting as long as we're above the low water mark
* for the cache. We do this without holding the locks to
* minimize lock contention.
*/
while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
dbuf_evict_one();
}
mutex_enter(&dbuf_evict_lock);
}
dbuf_evict_thread_exit = B_FALSE;
cv_broadcast(&dbuf_evict_cv);
CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */
thread_exit();
}
/*
* Wake up the dbuf eviction thread if the dbuf cache is at its max size.
* If the dbuf cache is at its high water mark, then evict a dbuf from the
* dbuf cache using the caller's context.
*/
static void
dbuf_evict_notify(uint64_t size)
{
/*
* We check if we should evict without holding the dbuf_evict_lock,
* because it's OK to occasionally make the wrong decision here,
* and grabbing the lock results in massive lock contention.
*/
if (size > dbuf_cache_target_bytes()) {
if (size > dbuf_cache_hiwater_bytes())
dbuf_evict_one();
cv_signal(&dbuf_evict_cv);
}
}
static int
dbuf_kstat_update(kstat_t *ksp, int rw)
{
dbuf_stats_t *ds = ksp->ks_data;
dbuf_hash_table_t *h = &dbuf_hash_table;
if (rw == KSTAT_WRITE)
return (SET_ERROR(EACCES));
ds->cache_count.value.ui64 =
wmsum_value(&dbuf_sums.cache_count);
ds->cache_size_bytes.value.ui64 =
zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
ds->cache_total_evicts.value.ui64 =
wmsum_value(&dbuf_sums.cache_total_evicts);
for (int i = 0; i < DN_MAX_LEVELS; i++) {
ds->cache_levels[i].value.ui64 =
wmsum_value(&dbuf_sums.cache_levels[i]);
ds->cache_levels_bytes[i].value.ui64 =
wmsum_value(&dbuf_sums.cache_levels_bytes[i]);
}
ds->hash_hits.value.ui64 =
wmsum_value(&dbuf_sums.hash_hits);
ds->hash_misses.value.ui64 =
wmsum_value(&dbuf_sums.hash_misses);
ds->hash_collisions.value.ui64 =
wmsum_value(&dbuf_sums.hash_collisions);
ds->hash_chains.value.ui64 =
wmsum_value(&dbuf_sums.hash_chains);
ds->hash_insert_race.value.ui64 =
wmsum_value(&dbuf_sums.hash_insert_race);
ds->hash_table_count.value.ui64 = h->hash_table_mask + 1;
ds->hash_mutex_count.value.ui64 = h->hash_mutex_mask + 1;
ds->metadata_cache_count.value.ui64 =
wmsum_value(&dbuf_sums.metadata_cache_count);
ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
&dbuf_caches[DB_DBUF_METADATA_CACHE].size);
ds->metadata_cache_overflow.value.ui64 =
wmsum_value(&dbuf_sums.metadata_cache_overflow);
return (0);
}
void
dbuf_init(void)
{
uint64_t hmsize, hsize = 1ULL << 16;
dbuf_hash_table_t *h = &dbuf_hash_table;
/*
* The hash table is big enough to fill one eighth of physical memory
* with an average block size of zfs_arc_average_blocksize (default 8K).
* By default, the table will take up
* totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
*/
while (hsize * zfs_arc_average_blocksize < arc_all_memory() / 8)
hsize <<= 1;
h->hash_table = NULL;
while (h->hash_table == NULL) {
h->hash_table_mask = hsize - 1;
h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
if (h->hash_table == NULL)
hsize >>= 1;
ASSERT3U(hsize, >=, 1ULL << 10);
}
/*
* The hash table buckets are protected by an array of mutexes where
* each mutex is reponsible for protecting 128 buckets. A minimum
* array size of 8192 is targeted to avoid contention.
*/
if (dbuf_mutex_cache_shift == 0)
hmsize = MAX(hsize >> 7, 1ULL << 13);
else
hmsize = 1ULL << MIN(dbuf_mutex_cache_shift, 24);
h->hash_mutexes = NULL;
while (h->hash_mutexes == NULL) {
h->hash_mutex_mask = hmsize - 1;
h->hash_mutexes = vmem_zalloc(hmsize * sizeof (kmutex_t),
KM_SLEEP);
if (h->hash_mutexes == NULL)
hmsize >>= 1;
}
dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
sizeof (dmu_buf_impl_t),
0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
for (int i = 0; i < hmsize; i++)
mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
dbuf_stats_init(h);
/*
* All entries are queued via taskq_dispatch_ent(), so min/maxalloc
* configuration is not required.
*/
dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
multilist_create(&dbuf_caches[dcs].cache,
sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_cache_link),
dbuf_cache_multilist_index_func);
zfs_refcount_create(&dbuf_caches[dcs].size);
}
dbuf_evict_thread_exit = B_FALSE;
mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
NULL, 0, &p0, TS_RUN, minclsyspri);
wmsum_init(&dbuf_sums.cache_count, 0);
wmsum_init(&dbuf_sums.cache_total_evicts, 0);
for (int i = 0; i < DN_MAX_LEVELS; i++) {
wmsum_init(&dbuf_sums.cache_levels[i], 0);
wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0);
}
wmsum_init(&dbuf_sums.hash_hits, 0);
wmsum_init(&dbuf_sums.hash_misses, 0);
wmsum_init(&dbuf_sums.hash_collisions, 0);
wmsum_init(&dbuf_sums.hash_chains, 0);
wmsum_init(&dbuf_sums.hash_insert_race, 0);
wmsum_init(&dbuf_sums.metadata_cache_count, 0);
wmsum_init(&dbuf_sums.metadata_cache_overflow, 0);
dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (dbuf_ksp != NULL) {
for (int i = 0; i < DN_MAX_LEVELS; i++) {
snprintf(dbuf_stats.cache_levels[i].name,
KSTAT_STRLEN, "cache_level_%d", i);
dbuf_stats.cache_levels[i].data_type =
KSTAT_DATA_UINT64;
snprintf(dbuf_stats.cache_levels_bytes[i].name,
KSTAT_STRLEN, "cache_level_%d_bytes", i);
dbuf_stats.cache_levels_bytes[i].data_type =
KSTAT_DATA_UINT64;
}
dbuf_ksp->ks_data = &dbuf_stats;
dbuf_ksp->ks_update = dbuf_kstat_update;
kstat_install(dbuf_ksp);
}
}
void
dbuf_fini(void)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
dbuf_stats_destroy();
for (int i = 0; i < (h->hash_mutex_mask + 1); i++)
mutex_destroy(&h->hash_mutexes[i]);
vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
vmem_free(h->hash_mutexes, (h->hash_mutex_mask + 1) *
sizeof (kmutex_t));
kmem_cache_destroy(dbuf_kmem_cache);
taskq_destroy(dbu_evict_taskq);
mutex_enter(&dbuf_evict_lock);
dbuf_evict_thread_exit = B_TRUE;
while (dbuf_evict_thread_exit) {
cv_signal(&dbuf_evict_cv);
cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
}
mutex_exit(&dbuf_evict_lock);
mutex_destroy(&dbuf_evict_lock);
cv_destroy(&dbuf_evict_cv);
for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
zfs_refcount_destroy(&dbuf_caches[dcs].size);
multilist_destroy(&dbuf_caches[dcs].cache);
}
if (dbuf_ksp != NULL) {
kstat_delete(dbuf_ksp);
dbuf_ksp = NULL;
}
wmsum_fini(&dbuf_sums.cache_count);
wmsum_fini(&dbuf_sums.cache_total_evicts);
for (int i = 0; i < DN_MAX_LEVELS; i++) {
wmsum_fini(&dbuf_sums.cache_levels[i]);
wmsum_fini(&dbuf_sums.cache_levels_bytes[i]);
}
wmsum_fini(&dbuf_sums.hash_hits);
wmsum_fini(&dbuf_sums.hash_misses);
wmsum_fini(&dbuf_sums.hash_collisions);
wmsum_fini(&dbuf_sums.hash_chains);
wmsum_fini(&dbuf_sums.hash_insert_race);
wmsum_fini(&dbuf_sums.metadata_cache_count);
wmsum_fini(&dbuf_sums.metadata_cache_overflow);
}
/*
* Other stuff.
*/
#ifdef ZFS_DEBUG
static void
dbuf_verify(dmu_buf_impl_t *db)
{
dnode_t *dn;
dbuf_dirty_record_t *dr;
uint32_t txg_prev;
ASSERT(MUTEX_HELD(&db->db_mtx));
if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
return;
ASSERT(db->db_objset != NULL);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (dn == NULL) {
ASSERT(db->db_parent == NULL);
ASSERT(db->db_blkptr == NULL);
} else {
ASSERT3U(db->db.db_object, ==, dn->dn_object);
ASSERT3P(db->db_objset, ==, dn->dn_objset);
ASSERT3U(db->db_level, <, dn->dn_nlevels);
ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
db->db_blkid == DMU_SPILL_BLKID ||
!avl_is_empty(&dn->dn_dbufs));
}
if (db->db_blkid == DMU_BONUS_BLKID) {
ASSERT(dn != NULL);
ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
} else if (db->db_blkid == DMU_SPILL_BLKID) {
ASSERT(dn != NULL);
ASSERT0(db->db.db_offset);
} else {
ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
}
if ((dr = list_head(&db->db_dirty_records)) != NULL) {
ASSERT(dr->dr_dbuf == db);
txg_prev = dr->dr_txg;
for (dr = list_next(&db->db_dirty_records, dr); dr != NULL;
dr = list_next(&db->db_dirty_records, dr)) {
ASSERT(dr->dr_dbuf == db);
ASSERT(txg_prev > dr->dr_txg);
txg_prev = dr->dr_txg;
}
}
/*
* We can't assert that db_size matches dn_datablksz because it
* can be momentarily different when another thread is doing
* dnode_set_blksz().
*/
if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
dr = db->db_data_pending;
/*
* It should only be modified in syncing context, so
* make sure we only have one copy of the data.
*/
ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
}
/* verify db->db_blkptr */
if (db->db_blkptr) {
if (db->db_parent == dn->dn_dbuf) {
/* db is pointed to by the dnode */
/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
ASSERT(db->db_parent == NULL);
else
ASSERT(db->db_parent != NULL);
if (db->db_blkid != DMU_SPILL_BLKID)
ASSERT3P(db->db_blkptr, ==,
&dn->dn_phys->dn_blkptr[db->db_blkid]);
} else {
/* db is pointed to by an indirect block */
int epb __maybe_unused = db->db_parent->db.db_size >>
SPA_BLKPTRSHIFT;
ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
ASSERT3U(db->db_parent->db.db_object, ==,
db->db.db_object);
/*
* dnode_grow_indblksz() can make this fail if we don't
* have the parent's rwlock. XXX indblksz no longer
* grows. safe to do this now?
*/
if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) {
ASSERT3P(db->db_blkptr, ==,
((blkptr_t *)db->db_parent->db.db_data +
db->db_blkid % epb));
}
}
}
if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
(db->db_buf == NULL || db->db_buf->b_data) &&
db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
db->db_state != DB_FILL && (dn == NULL || !dn->dn_free_txg)) {
/*
* If the blkptr isn't set but they have nonzero data,
* it had better be dirty, otherwise we'll lose that
* data when we evict this buffer.
*
* There is an exception to this rule for indirect blocks; in
* this case, if the indirect block is a hole, we fill in a few
* fields on each of the child blocks (importantly, birth time)
* to prevent hole birth times from being lost when you
* partially fill in a hole.
*/
if (db->db_dirtycnt == 0) {
if (db->db_level == 0) {
uint64_t *buf = db->db.db_data;
int i;
for (i = 0; i < db->db.db_size >> 3; i++) {
ASSERT(buf[i] == 0);
}
} else {
blkptr_t *bps = db->db.db_data;
ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
db->db.db_size);
/*
* We want to verify that all the blkptrs in the
* indirect block are holes, but we may have
* automatically set up a few fields for them.
* We iterate through each blkptr and verify
* they only have those fields set.
*/
for (int i = 0;
i < db->db.db_size / sizeof (blkptr_t);
i++) {
blkptr_t *bp = &bps[i];
ASSERT(ZIO_CHECKSUM_IS_ZERO(
&bp->blk_cksum));
ASSERT(
DVA_IS_EMPTY(&bp->blk_dva[0]) &&
DVA_IS_EMPTY(&bp->blk_dva[1]) &&
DVA_IS_EMPTY(&bp->blk_dva[2]));
ASSERT0(bp->blk_fill);
ASSERT0(bp->blk_pad[0]);
ASSERT0(bp->blk_pad[1]);
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(BP_IS_HOLE(bp));
ASSERT0(bp->blk_phys_birth);
}
}
}
}
DB_DNODE_EXIT(db);
}
#endif
static void
dbuf_clear_data(dmu_buf_impl_t *db)
{
ASSERT(MUTEX_HELD(&db->db_mtx));
dbuf_evict_user(db);
ASSERT3P(db->db_buf, ==, NULL);
db->db.db_data = NULL;
if (db->db_state != DB_NOFILL) {
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "clear data");
}
}
static void
dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
{
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(buf != NULL);
db->db_buf = buf;
ASSERT(buf->b_data != NULL);
db->db.db_data = buf->b_data;
}
static arc_buf_t *
dbuf_alloc_arcbuf(dmu_buf_impl_t *db)
{
spa_t *spa = db->db_objset->os_spa;
return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size));
}
/*
* Loan out an arc_buf for read. Return the loaned arc_buf.
*/
arc_buf_t *
dbuf_loan_arcbuf(dmu_buf_impl_t *db)
{
arc_buf_t *abuf;
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
mutex_enter(&db->db_mtx);
if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
int blksz = db->db.db_size;
spa_t *spa = db->db_objset->os_spa;
mutex_exit(&db->db_mtx);
abuf = arc_loan_buf(spa, B_FALSE, blksz);
memcpy(abuf->b_data, db->db.db_data, blksz);
} else {
abuf = db->db_buf;
arc_loan_inuse_buf(abuf, db);
db->db_buf = NULL;
dbuf_clear_data(db);
mutex_exit(&db->db_mtx);
}
return (abuf);
}
/*
* Calculate which level n block references the data at the level 0 offset
* provided.
*/
uint64_t
dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
{
if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
/*
* The level n blkid is equal to the level 0 blkid divided by
* the number of level 0s in a level n block.
*
* The level 0 blkid is offset >> datablkshift =
* offset / 2^datablkshift.
*
* The number of level 0s in a level n is the number of block
* pointers in an indirect block, raised to the power of level.
* This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
* 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
*
* Thus, the level n blkid is: offset /
* ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT))))
* = offset / 2^(datablkshift + level *
* (indblkshift - SPA_BLKPTRSHIFT))
* = offset >> (datablkshift + level *
* (indblkshift - SPA_BLKPTRSHIFT))
*/
const unsigned exp = dn->dn_datablkshift +
level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
if (exp >= 8 * sizeof (offset)) {
/* This only happens on the highest indirection level */
ASSERT3U(level, ==, dn->dn_nlevels - 1);
return (0);
}
ASSERT3U(exp, <, 8 * sizeof (offset));
return (offset >> exp);
} else {
ASSERT3U(offset, <, dn->dn_datablksz);
return (0);
}
}
/*
* This function is used to lock the parent of the provided dbuf. This should be
* used when modifying or reading db_blkptr.
*/
db_lock_type_t
dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, const void *tag)
{
enum db_lock_type ret = DLT_NONE;
if (db->db_parent != NULL) {
rw_enter(&db->db_parent->db_rwlock, rw);
ret = DLT_PARENT;
} else if (dmu_objset_ds(db->db_objset) != NULL) {
rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw,
tag);
ret = DLT_OBJSET;
}
/*
* We only return a DLT_NONE lock when it's the top-most indirect block
* of the meta-dnode of the MOS.
*/
return (ret);
}
/*
* We need to pass the lock type in because it's possible that the block will
* move from being the topmost indirect block in a dnode (and thus, have no
* parent) to not the top-most via an indirection increase. This would cause a
* panic if we didn't pass the lock type in.
*/
void
dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, const void *tag)
{
if (type == DLT_PARENT)
rw_exit(&db->db_parent->db_rwlock);
else if (type == DLT_OBJSET)
rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag);
}
static void
dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *vdb)
{
(void) zb, (void) bp;
dmu_buf_impl_t *db = vdb;
mutex_enter(&db->db_mtx);
ASSERT3U(db->db_state, ==, DB_READ);
/*
* All reads are synchronous, so we must have a hold on the dbuf
*/
ASSERT(zfs_refcount_count(&db->db_holds) > 0);
ASSERT(db->db_buf == NULL);
ASSERT(db->db.db_data == NULL);
if (buf == NULL) {
/* i/o error */
ASSERT(zio == NULL || zio->io_error != 0);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT3P(db->db_buf, ==, NULL);
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "i/o error");
} else if (db->db_level == 0 && db->db_freed_in_flight) {
/* freed in flight */
ASSERT(zio == NULL || zio->io_error == 0);
arc_release(buf, db);
memset(buf->b_data, 0, db->db.db_size);
arc_buf_freeze(buf);
db->db_freed_in_flight = FALSE;
dbuf_set_data(db, buf);
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "freed in flight");
} else {
/* success */
ASSERT(zio == NULL || zio->io_error == 0);
dbuf_set_data(db, buf);
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "successful read");
}
cv_broadcast(&db->db_changed);
dbuf_rele_and_unlock(db, NULL, B_FALSE);
}
/*
* Shortcut for performing reads on bonus dbufs. Returns
* an error if we fail to verify the dnode associated with
* a decrypted block. Otherwise success.
*/
static int
dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
{
int bonuslen, max_bonuslen, err;
err = dbuf_read_verify_dnode_crypt(db, flags);
if (err)
return (err);
bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(DB_DNODE_HELD(db));
ASSERT3U(bonuslen, <=, db->db.db_size);
db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
if (bonuslen < max_bonuslen)
memset(db->db.db_data, 0, max_bonuslen);
if (bonuslen)
memcpy(db->db.db_data, DN_BONUS(dn->dn_phys), bonuslen);
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "bonus buffer filled");
return (0);
}
static void
dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *dbbp)
{
blkptr_t *bps = db->db.db_data;
uint32_t indbs = 1ULL << dn->dn_indblkshift;
int n_bps = indbs >> SPA_BLKPTRSHIFT;
for (int i = 0; i < n_bps; i++) {
blkptr_t *bp = &bps[i];
ASSERT3U(BP_GET_LSIZE(dbbp), ==, indbs);
BP_SET_LSIZE(bp, BP_GET_LEVEL(dbbp) == 1 ?
dn->dn_datablksz : BP_GET_LSIZE(dbbp));
BP_SET_TYPE(bp, BP_GET_TYPE(dbbp));
BP_SET_LEVEL(bp, BP_GET_LEVEL(dbbp) - 1);
BP_SET_BIRTH(bp, dbbp->blk_birth, 0);
}
}
/*
* Handle reads on dbufs that are holes, if necessary. This function
* requires that the dbuf's mutex is held. Returns success (0) if action
* was taken, ENOENT if no action was taken.
*/
static int
dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *bp)
{
ASSERT(MUTEX_HELD(&db->db_mtx));
int is_hole = bp == NULL || BP_IS_HOLE(bp);
/*
* For level 0 blocks only, if the above check fails:
* Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
* processes the delete record and clears the bp while we are waiting
* for the dn_mtx (resulting in a "no" from block_freed).
*/
if (!is_hole && db->db_level == 0)
is_hole = dnode_block_freed(dn, db->db_blkid) || BP_IS_HOLE(bp);
if (is_hole) {
dbuf_set_data(db, dbuf_alloc_arcbuf(db));
memset(db->db.db_data, 0, db->db.db_size);
if (bp != NULL && db->db_level > 0 && BP_IS_HOLE(bp) &&
bp->blk_birth != 0) {
dbuf_handle_indirect_hole(db, dn, bp);
}
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "hole read satisfied");
return (0);
}
return (ENOENT);
}
/*
* This function ensures that, when doing a decrypting read of a block,
* we make sure we have decrypted the dnode associated with it. We must do
* this so that we ensure we are fully authenticating the checksum-of-MACs
* tree from the root of the objset down to this block. Indirect blocks are
* always verified against their secure checksum-of-MACs assuming that the
* dnode containing them is correct. Now that we are doing a decrypting read,
* we can be sure that the key is loaded and verify that assumption. This is
* especially important considering that we always read encrypted dnode
* blocks as raw data (without verifying their MACs) to start, and
* decrypt / authenticate them when we need to read an encrypted bonus buffer.
*/
static int
dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags)
{
int err = 0;
objset_t *os = db->db_objset;
arc_buf_t *dnode_abuf;
dnode_t *dn;
zbookmark_phys_t zb;
ASSERT(MUTEX_HELD(&db->db_mtx));
if ((flags & DB_RF_NO_DECRYPT) != 0 ||
!os->os_encrypted || os->os_raw_receive)
return (0);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL;
if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) {
DB_DNODE_EXIT(db);
return (0);
}
SET_BOOKMARK(&zb, dmu_objset_id(os),
DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid);
err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE);
/*
* An error code of EACCES tells us that the key is still not
* available. This is ok if we are only reading authenticated
* (and therefore non-encrypted) blocks.
*/
if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
!DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
(db->db_blkid == DMU_BONUS_BLKID &&
!DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
err = 0;
DB_DNODE_EXIT(db);
return (err);
}
/*
* Drops db_mtx and the parent lock specified by dblt and tag before
* returning.
*/
static int
dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags,
db_lock_type_t dblt, const void *tag)
{
dnode_t *dn;
zbookmark_phys_t zb;
uint32_t aflags = ARC_FLAG_NOWAIT;
int err, zio_flags;
blkptr_t bp, *bpp;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
ASSERT(db->db_buf == NULL);
ASSERT(db->db_parent == NULL ||
RW_LOCK_HELD(&db->db_parent->db_rwlock));
if (db->db_blkid == DMU_BONUS_BLKID) {
err = dbuf_read_bonus(db, dn, flags);
goto early_unlock;
}
if (db->db_state == DB_UNCACHED) {
if (db->db_blkptr == NULL) {
bpp = NULL;
} else {
bp = *db->db_blkptr;
bpp = &bp;
}
} else {
dbuf_dirty_record_t *dr;
ASSERT3S(db->db_state, ==, DB_NOFILL);
/*
* Block cloning: If we have a pending block clone,
* we don't want to read the underlying block, but the content
* of the block being cloned, so we have the most recent data.
*/
dr = list_head(&db->db_dirty_records);
if (dr == NULL || !dr->dt.dl.dr_brtwrite) {
err = EIO;
goto early_unlock;
}
bp = dr->dt.dl.dr_overridden_by;
bpp = &bp;
}
err = dbuf_read_hole(db, dn, bpp);
if (err == 0)
goto early_unlock;
ASSERT(bpp != NULL);
/*
* Any attempt to read a redacted block should result in an error. This
* will never happen under normal conditions, but can be useful for
* debugging purposes.
*/
if (BP_IS_REDACTED(bpp)) {
ASSERT(dsl_dataset_feature_is_active(
db->db_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
err = SET_ERROR(EIO);
goto early_unlock;
}
SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
db->db.db_object, db->db_level, db->db_blkid);
/*
* All bps of an encrypted os should have the encryption bit set.
* If this is not true it indicates tampering and we report an error.
*/
if (db->db_objset->os_encrypted && !BP_USES_CRYPT(bpp)) {
spa_log_error(db->db_objset->os_spa, &zb, &bpp->blk_birth);
zfs_panic_recover("unencrypted block in encrypted "
"object set %llu", dmu_objset_id(db->db_objset));
err = SET_ERROR(EIO);
goto early_unlock;
}
err = dbuf_read_verify_dnode_crypt(db, flags);
if (err != 0)
goto early_unlock;
DB_DNODE_EXIT(db);
db->db_state = DB_READ;
DTRACE_SET_STATE(db, "read issued");
mutex_exit(&db->db_mtx);
if (!DBUF_IS_CACHEABLE(db))
aflags |= ARC_FLAG_UNCACHED;
else if (dbuf_is_l2cacheable(db))
aflags |= ARC_FLAG_L2CACHE;
dbuf_add_ref(db, NULL);
zio_flags = (flags & DB_RF_CANFAIL) ?
ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr))
zio_flags |= ZIO_FLAG_RAW;
/*
* The zio layer will copy the provided blkptr later, but we have our
* own copy so that we can release the parent's rwlock. We have to
* do that so that if dbuf_read_done is called synchronously (on
* an l1 cache hit) we don't acquire the db_mtx while holding the
* parent's rwlock, which would be a lock ordering violation.
*/
dmu_buf_unlock_parent(db, dblt, tag);
(void) arc_read(zio, db->db_objset->os_spa, bpp,
dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
&aflags, &zb);
return (err);
early_unlock:
DB_DNODE_EXIT(db);
mutex_exit(&db->db_mtx);
dmu_buf_unlock_parent(db, dblt, tag);
return (err);
}
/*
* This is our just-in-time copy function. It makes a copy of buffers that
* have been modified in a previous transaction group before we access them in
* the current active group.
*
* This function is used in three places: when we are dirtying a buffer for the
* first time in a txg, when we are freeing a range in a dnode that includes
* this buffer, and when we are accessing a buffer which was received compressed
* and later referenced in a WRITE_BYREF record.
*
* Note that when we are called from dbuf_free_range() we do not put a hold on
* the buffer, we just traverse the active dbuf list for the dnode.
*/
static void
dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
{
dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(db->db.db_data != NULL);
ASSERT(db->db_level == 0);
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
if (dr == NULL ||
(dr->dt.dl.dr_data !=
((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
return;
/*
* If the last dirty record for this dbuf has not yet synced
* and its referencing the dbuf data, either:
* reset the reference to point to a new copy,
* or (if there a no active holders)
* just null out the current db_data pointer.
*/
ASSERT3U(dr->dr_txg, >=, txg - 2);
if (db->db_blkid == DMU_BONUS_BLKID) {
dnode_t *dn = DB_DNODE(db);
int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
arc_space_consume(bonuslen, ARC_SPACE_BONUS);
memcpy(dr->dt.dl.dr_data, db->db.db_data, bonuslen);
} else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
dnode_t *dn = DB_DNODE(db);
int size = arc_buf_size(db->db_buf);
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
spa_t *spa = db->db_objset->os_spa;
enum zio_compress compress_type =
arc_get_compression(db->db_buf);
uint8_t complevel = arc_get_complevel(db->db_buf);
if (arc_is_encrypted(db->db_buf)) {
boolean_t byteorder;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
arc_get_raw_params(db->db_buf, &byteorder, salt,
iv, mac);
dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
compress_type, complevel);
} else if (compress_type != ZIO_COMPRESS_OFF) {
ASSERT3U(type, ==, ARC_BUFC_DATA);
dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
size, arc_buf_lsize(db->db_buf), compress_type,
complevel);
} else {
dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
}
memcpy(dr->dt.dl.dr_data->b_data, db->db.db_data, size);
} else {
db->db_buf = NULL;
dbuf_clear_data(db);
}
}
int
dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
{
int err = 0;
boolean_t prefetch;
dnode_t *dn;
/*
* We don't have to hold the mutex to check db_state because it
* can't be freed while we have a hold on the buffer.
*/
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
(flags & DB_RF_NOPREFETCH) == 0 && dn != NULL;
mutex_enter(&db->db_mtx);
if (flags & DB_RF_PARTIAL_FIRST)
db->db_partial_read = B_TRUE;
else if (!(flags & DB_RF_PARTIAL_MORE))
db->db_partial_read = B_FALSE;
if (db->db_state == DB_CACHED) {
/*
* Ensure that this block's dnode has been decrypted if
* the caller has requested decrypted data.
*/
err = dbuf_read_verify_dnode_crypt(db, flags);
/*
* If the arc buf is compressed or encrypted and the caller
* requested uncompressed data, we need to untransform it
* before returning. We also call arc_untransform() on any
* unauthenticated blocks, which will verify their MAC if
* the key is now available.
*/
if (err == 0 && db->db_buf != NULL &&
(flags & DB_RF_NO_DECRYPT) == 0 &&
(arc_is_encrypted(db->db_buf) ||
arc_is_unauthenticated(db->db_buf) ||
arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
spa_t *spa = dn->dn_objset->os_spa;
zbookmark_phys_t zb;
SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
db->db.db_object, db->db_level, db->db_blkid);
dbuf_fix_old_data(db, spa_syncing_txg(spa));
err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
dbuf_set_data(db, db->db_buf);
}
mutex_exit(&db->db_mtx);
if (err == 0 && prefetch) {
dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
B_FALSE, flags & DB_RF_HAVESTRUCT);
}
DB_DNODE_EXIT(db);
DBUF_STAT_BUMP(hash_hits);
} else if (db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL) {
boolean_t need_wait = B_FALSE;
db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
if (zio == NULL && (db->db_state == DB_NOFILL ||
(db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)))) {
spa_t *spa = dn->dn_objset->os_spa;
zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
need_wait = B_TRUE;
}
err = dbuf_read_impl(db, zio, flags, dblt, FTAG);
/*
* dbuf_read_impl has dropped db_mtx and our parent's rwlock
* for us
*/
if (!err && prefetch) {
dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
db->db_state != DB_CACHED,
flags & DB_RF_HAVESTRUCT);
}
DB_DNODE_EXIT(db);
DBUF_STAT_BUMP(hash_misses);
/*
* If we created a zio_root we must execute it to avoid
* leaking it, even if it isn't attached to any work due
* to an error in dbuf_read_impl().
*/
if (need_wait) {
if (err == 0)
err = zio_wait(zio);
else
VERIFY0(zio_wait(zio));
}
} else {
/*
* Another reader came in while the dbuf was in flight
* between UNCACHED and CACHED. Either a writer will finish
* writing the buffer (sending the dbuf to CACHED) or the
* first reader's request will reach the read_done callback
* and send the dbuf to CACHED. Otherwise, a failure
* occurred and the dbuf went to UNCACHED.
*/
mutex_exit(&db->db_mtx);
if (prefetch) {
dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
B_TRUE, flags & DB_RF_HAVESTRUCT);
}
DB_DNODE_EXIT(db);
DBUF_STAT_BUMP(hash_misses);
/* Skip the wait per the caller's request. */
if ((flags & DB_RF_NEVERWAIT) == 0) {
mutex_enter(&db->db_mtx);
while (db->db_state == DB_READ ||
db->db_state == DB_FILL) {
ASSERT(db->db_state == DB_READ ||
(flags & DB_RF_HAVESTRUCT) == 0);
DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
db, zio_t *, zio);
cv_wait(&db->db_changed, &db->db_mtx);
}
if (db->db_state == DB_UNCACHED)
err = SET_ERROR(EIO);
mutex_exit(&db->db_mtx);
}
}
return (err);
}
static void
dbuf_noread(dmu_buf_impl_t *db)
{
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
mutex_enter(&db->db_mtx);
while (db->db_state == DB_READ || db->db_state == DB_FILL)
cv_wait(&db->db_changed, &db->db_mtx);
if (db->db_state == DB_UNCACHED) {
ASSERT(db->db_buf == NULL);
ASSERT(db->db.db_data == NULL);
dbuf_set_data(db, dbuf_alloc_arcbuf(db));
db->db_state = DB_FILL;
DTRACE_SET_STATE(db, "assigning filled buffer");
} else if (db->db_state == DB_NOFILL) {
dbuf_clear_data(db);
} else {
ASSERT3U(db->db_state, ==, DB_CACHED);
}
mutex_exit(&db->db_mtx);
}
void
dbuf_unoverride(dbuf_dirty_record_t *dr)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
uint64_t txg = dr->dr_txg;
boolean_t release;
ASSERT(MUTEX_HELD(&db->db_mtx));
/*
* This assert is valid because dmu_sync() expects to be called by
* a zilog's get_data while holding a range lock. This call only
* comes from dbuf_dirty() callers who must also hold a range lock.
*/
ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
ASSERT(db->db_level == 0);
if (db->db_blkid == DMU_BONUS_BLKID ||
dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
return;
ASSERT(db->db_data_pending != dr);
/* free this block */
if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
zio_free(db->db_objset->os_spa, txg, bp);
release = !dr->dt.dl.dr_brtwrite;
dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
dr->dt.dl.dr_nopwrite = B_FALSE;
dr->dt.dl.dr_brtwrite = B_FALSE;
dr->dt.dl.dr_has_raw_params = B_FALSE;
/*
* Release the already-written buffer, so we leave it in
* a consistent dirty state. Note that all callers are
* modifying the buffer, so they will immediately do
* another (redundant) arc_release(). Therefore, leave
* the buf thawed to save the effort of freezing &
* immediately re-thawing it.
*/
if (release)
arc_release(dr->dt.dl.dr_data, db);
}
/*
* Evict (if its unreferenced) or clear (if its referenced) any level-0
* data blocks in the free range, so that any future readers will find
* empty blocks.
*/
void
dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db_search;
dmu_buf_impl_t *db, *db_next;
uint64_t txg = tx->tx_txg;
avl_index_t where;
dbuf_dirty_record_t *dr;
if (end_blkid > dn->dn_maxblkid &&
!(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
end_blkid = dn->dn_maxblkid;
dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid,
(u_longlong_t)end_blkid);
db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
db_search->db_level = 0;
db_search->db_blkid = start_blkid;
db_search->db_state = DB_SEARCH;
mutex_enter(&dn->dn_dbufs_mtx);
db = avl_find(&dn->dn_dbufs, db_search, &where);
ASSERT3P(db, ==, NULL);
db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
for (; db != NULL; db = db_next) {
db_next = AVL_NEXT(&dn->dn_dbufs, db);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
if (db->db_level != 0 || db->db_blkid > end_blkid) {
break;
}
ASSERT3U(db->db_blkid, >=, start_blkid);
/* found a level 0 buffer in the range */
mutex_enter(&db->db_mtx);
if (dbuf_undirty(db, tx)) {
/* mutex has been dropped and dbuf destroyed */
continue;
}
if (db->db_state == DB_UNCACHED ||
db->db_state == DB_NOFILL ||
db->db_state == DB_EVICTING) {
ASSERT(db->db.db_data == NULL);
mutex_exit(&db->db_mtx);
continue;
}
if (db->db_state == DB_READ || db->db_state == DB_FILL) {
/* will be handled in dbuf_read_done or dbuf_rele */
db->db_freed_in_flight = TRUE;
mutex_exit(&db->db_mtx);
continue;
}
if (zfs_refcount_count(&db->db_holds) == 0) {
ASSERT(db->db_buf);
dbuf_destroy(db);
continue;
}
/* The dbuf is referenced */
dr = list_head(&db->db_dirty_records);
if (dr != NULL) {
if (dr->dr_txg == txg) {
/*
* This buffer is "in-use", re-adjust the file
* size to reflect that this buffer may
* contain new data when we sync.
*/
if (db->db_blkid != DMU_SPILL_BLKID &&
db->db_blkid > dn->dn_maxblkid)
dn->dn_maxblkid = db->db_blkid;
dbuf_unoverride(dr);
} else {
/*
* This dbuf is not dirty in the open context.
* Either uncache it (if its not referenced in
* the open context) or reset its contents to
* empty.
*/
dbuf_fix_old_data(db, txg);
}
}
/* clear the contents if its cached */
if (db->db_state == DB_CACHED) {
ASSERT(db->db.db_data != NULL);
arc_release(db->db_buf, db);
rw_enter(&db->db_rwlock, RW_WRITER);
memset(db->db.db_data, 0, db->db.db_size);
rw_exit(&db->db_rwlock);
arc_buf_freeze(db->db_buf);
}
mutex_exit(&db->db_mtx);
}
mutex_exit(&dn->dn_dbufs_mtx);
kmem_free(db_search, sizeof (dmu_buf_impl_t));
}
void
dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
{
arc_buf_t *buf, *old_buf;
dbuf_dirty_record_t *dr;
int osize = db->db.db_size;
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
dnode_t *dn;
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
/*
* XXX we should be doing a dbuf_read, checking the return
* value and returning that up to our callers
*/
dmu_buf_will_dirty(&db->db, tx);
/* create the data buffer for the new block */
buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
/* copy old block data to the new block */
old_buf = db->db_buf;
memcpy(buf->b_data, old_buf->b_data, MIN(osize, size));
/* zero the remainder */
if (size > osize)
memset((uint8_t *)buf->b_data + osize, 0, size - osize);
mutex_enter(&db->db_mtx);
dbuf_set_data(db, buf);
arc_buf_destroy(old_buf, db);
db->db.db_size = size;
dr = list_head(&db->db_dirty_records);
/* dirty record added by dmu_buf_will_dirty() */
VERIFY(dr != NULL);
if (db->db_level == 0)
dr->dt.dl.dr_data = buf;
ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
ASSERT3U(dr->dr_accounted, ==, osize);
dr->dr_accounted = size;
mutex_exit(&db->db_mtx);
dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
DB_DNODE_EXIT(db);
}
void
dbuf_release_bp(dmu_buf_impl_t *db)
{
objset_t *os __maybe_unused = db->db_objset;
ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
ASSERT(arc_released(os->os_phys_buf) ||
list_link_active(&os->os_dsl_dataset->ds_synced_link));
ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
(void) arc_release(db->db_buf, db);
}
/*
* We already have a dirty record for this TXG, and we are being
* dirtied again.
*/
static void
dbuf_redirty(dbuf_dirty_record_t *dr)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
ASSERT(MUTEX_HELD(&db->db_mtx));
if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
/*
* If this buffer has already been written out,
* we now need to reset its state.
*/
dbuf_unoverride(dr);
if (db->db.db_object != DMU_META_DNODE_OBJECT &&
db->db_state != DB_NOFILL) {
/* Already released on initial dirty, so just thaw. */
ASSERT(arc_released(db->db_buf));
arc_buf_thaw(db->db_buf);
}
}
}
dbuf_dirty_record_t *
dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx)
{
rw_enter(&dn->dn_struct_rwlock, RW_READER);
IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid);
dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE);
ASSERT(dn->dn_maxblkid >= blkid);
dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP);
list_link_init(&dr->dr_dirty_node);
list_link_init(&dr->dr_dbuf_node);
dr->dr_dnode = dn;
dr->dr_txg = tx->tx_txg;
dr->dt.dll.dr_blkid = blkid;
dr->dr_accounted = dn->dn_datablksz;
/*
* There should not be any dbuf for the block that we're dirtying.
* Otherwise the buffer contents could be inconsistent between the
* dbuf and the lightweight dirty record.
*/
ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid,
NULL));
mutex_enter(&dn->dn_mtx);
int txgoff = tx->tx_txg & TXG_MASK;
if (dn->dn_free_ranges[txgoff] != NULL) {
range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1);
}
if (dn->dn_nlevels == 1) {
ASSERT3U(blkid, <, dn->dn_nblkptr);
list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
mutex_exit(&dn->dn_mtx);
rw_exit(&dn->dn_struct_rwlock);
dnode_setdirty(dn, tx);
} else {
mutex_exit(&dn->dn_mtx);
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
dmu_buf_impl_t *parent_db = dbuf_hold_level(dn,
1, blkid >> epbs, FTAG);
rw_exit(&dn->dn_struct_rwlock);
if (parent_db == NULL) {
kmem_free(dr, sizeof (*dr));
return (NULL);
}
int err = dbuf_read(parent_db, NULL,
(DB_RF_NOPREFETCH | DB_RF_CANFAIL));
if (err != 0) {
dbuf_rele(parent_db, FTAG);
kmem_free(dr, sizeof (*dr));
return (NULL);
}
dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx);
dbuf_rele(parent_db, FTAG);
mutex_enter(&parent_dr->dt.di.dr_mtx);
ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg);
list_insert_tail(&parent_dr->dt.di.dr_children, dr);
mutex_exit(&parent_dr->dt.di.dr_mtx);
dr->dr_parent = parent_dr;
}
dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx);
return (dr);
}
dbuf_dirty_record_t *
dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
{
dnode_t *dn;
objset_t *os;
dbuf_dirty_record_t *dr, *dr_next, *dr_head;
int txgoff = tx->tx_txg & TXG_MASK;
boolean_t drop_struct_rwlock = B_FALSE;
ASSERT(tx->tx_txg != 0);
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
DMU_TX_DIRTY_BUF(tx, db);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
/*
* Shouldn't dirty a regular buffer in syncing context. Private
* objects may be dirtied in syncing context, but only if they
* were already pre-dirtied in open context.
*/
#ifdef ZFS_DEBUG
if (dn->dn_objset->os_dsl_dataset != NULL) {
rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
RW_READER, FTAG);
}
ASSERT(!dmu_tx_is_syncing(tx) ||
BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
dn->dn_objset->os_dsl_dataset == NULL);
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
#endif
/*
* We make this assert for private objects as well, but after we
* check if we're already dirty. They are allowed to re-dirty
* in syncing context.
*/
ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
(dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
mutex_enter(&db->db_mtx);
/*
* XXX make this true for indirects too? The problem is that
* transactions created with dmu_tx_create_assigned() from
* syncing context don't bother holding ahead.
*/
ASSERT(db->db_level != 0 ||
db->db_state == DB_CACHED || db->db_state == DB_FILL ||
db->db_state == DB_NOFILL);
mutex_enter(&dn->dn_mtx);
dnode_set_dirtyctx(dn, tx, db);
if (tx->tx_txg > dn->dn_dirty_txg)
dn->dn_dirty_txg = tx->tx_txg;
mutex_exit(&dn->dn_mtx);
if (db->db_blkid == DMU_SPILL_BLKID)
dn->dn_have_spill = B_TRUE;
/*
* If this buffer is already dirty, we're done.
*/
dr_head = list_head(&db->db_dirty_records);
ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg ||
db->db.db_object == DMU_META_DNODE_OBJECT);
dr_next = dbuf_find_dirty_lte(db, tx->tx_txg);
if (dr_next && dr_next->dr_txg == tx->tx_txg) {
DB_DNODE_EXIT(db);
dbuf_redirty(dr_next);
mutex_exit(&db->db_mtx);
return (dr_next);
}
/*
* Only valid if not already dirty.
*/
ASSERT(dn->dn_object == 0 ||
dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
(dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
ASSERT3U(dn->dn_nlevels, >, db->db_level);
/*
* We should only be dirtying in syncing context if it's the
* mos or we're initializing the os or it's a special object.
* However, we are allowed to dirty in syncing context provided
* we already dirtied it in open context. Hence we must make
* this assertion only if we're not already dirty.
*/
os = dn->dn_objset;
VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
#ifdef ZFS_DEBUG
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
#endif
ASSERT(db->db.db_size != 0);
dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
dmu_objset_willuse_space(os, db->db.db_size, tx);
}
/*
* If this buffer is dirty in an old transaction group we need
* to make a copy of it so that the changes we make in this
* transaction group won't leak out when we sync the older txg.
*/
dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
list_link_init(&dr->dr_dirty_node);
list_link_init(&dr->dr_dbuf_node);
dr->dr_dnode = dn;
if (db->db_level == 0) {
void *data_old = db->db_buf;
if (db->db_state != DB_NOFILL) {
if (db->db_blkid == DMU_BONUS_BLKID) {
dbuf_fix_old_data(db, tx->tx_txg);
data_old = db->db.db_data;
} else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
/*
* Release the data buffer from the cache so
* that we can modify it without impacting
* possible other users of this cached data
* block. Note that indirect blocks and
* private objects are not released until the
* syncing state (since they are only modified
* then).
*/
arc_release(db->db_buf, db);
dbuf_fix_old_data(db, tx->tx_txg);
data_old = db->db_buf;
}
ASSERT(data_old != NULL);
}
dr->dt.dl.dr_data = data_old;
} else {
mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
list_create(&dr->dt.di.dr_children,
sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dirty_node));
}
if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
dr->dr_accounted = db->db.db_size;
}
dr->dr_dbuf = db;
dr->dr_txg = tx->tx_txg;
list_insert_before(&db->db_dirty_records, dr_next, dr);
/*
* We could have been freed_in_flight between the dbuf_noread
* and dbuf_dirty. We win, as though the dbuf_noread() had
* happened after the free.
*/
if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
db->db_blkid != DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
if (dn->dn_free_ranges[txgoff] != NULL) {
range_tree_clear(dn->dn_free_ranges[txgoff],
db->db_blkid, 1);
}
mutex_exit(&dn->dn_mtx);
db->db_freed_in_flight = FALSE;
}
/*
* This buffer is now part of this txg
*/
dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
db->db_dirtycnt += 1;
ASSERT3U(db->db_dirtycnt, <=, 3);
mutex_exit(&db->db_mtx);
if (db->db_blkid == DMU_BONUS_BLKID ||
db->db_blkid == DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
ASSERT(!list_link_active(&dr->dr_dirty_node));
list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
mutex_exit(&dn->dn_mtx);
dnode_setdirty(dn, tx);
DB_DNODE_EXIT(db);
return (dr);
}
if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
drop_struct_rwlock = B_TRUE;
}
/*
* If we are overwriting a dedup BP, then unless it is snapshotted,
* when we get to syncing context we will need to decrement its
* refcount in the DDT. Prefetch the relevant DDT block so that
* syncing context won't have to wait for the i/o.
*/
if (db->db_blkptr != NULL) {
db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
ddt_prefetch(os->os_spa, db->db_blkptr);
dmu_buf_unlock_parent(db, dblt, FTAG);
}
/*
* We need to hold the dn_struct_rwlock to make this assertion,
* because it protects dn_phys / dn_next_nlevels from changing.
*/
ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
dn->dn_phys->dn_nlevels > db->db_level ||
dn->dn_next_nlevels[txgoff] > db->db_level ||
dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
if (db->db_level == 0) {
ASSERT(!db->db_objset->os_raw_receive ||
dn->dn_maxblkid >= db->db_blkid);
dnode_new_blkid(dn, db->db_blkid, tx,
drop_struct_rwlock, B_FALSE);
ASSERT(dn->dn_maxblkid >= db->db_blkid);
}
if (db->db_level+1 < dn->dn_nlevels) {
dmu_buf_impl_t *parent = db->db_parent;
dbuf_dirty_record_t *di;
int parent_held = FALSE;
if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
parent = dbuf_hold_level(dn, db->db_level + 1,
db->db_blkid >> epbs, FTAG);
ASSERT(parent != NULL);
parent_held = TRUE;
}
if (drop_struct_rwlock)
rw_exit(&dn->dn_struct_rwlock);
ASSERT3U(db->db_level + 1, ==, parent->db_level);
di = dbuf_dirty(parent, tx);
if (parent_held)
dbuf_rele(parent, FTAG);
mutex_enter(&db->db_mtx);
/*
* Since we've dropped the mutex, it's possible that
* dbuf_undirty() might have changed this out from under us.
*/
if (list_head(&db->db_dirty_records) == dr ||
dn->dn_object == DMU_META_DNODE_OBJECT) {
mutex_enter(&di->dt.di.dr_mtx);
ASSERT3U(di->dr_txg, ==, tx->tx_txg);
ASSERT(!list_link_active(&dr->dr_dirty_node));
list_insert_tail(&di->dt.di.dr_children, dr);
mutex_exit(&di->dt.di.dr_mtx);
dr->dr_parent = di;
}
mutex_exit(&db->db_mtx);
} else {
ASSERT(db->db_level + 1 == dn->dn_nlevels);
ASSERT(db->db_blkid < dn->dn_nblkptr);
ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
mutex_enter(&dn->dn_mtx);
ASSERT(!list_link_active(&dr->dr_dirty_node));
list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
mutex_exit(&dn->dn_mtx);
if (drop_struct_rwlock)
rw_exit(&dn->dn_struct_rwlock);
}
dnode_setdirty(dn, tx);
DB_DNODE_EXIT(db);
return (dr);
}
static void
dbuf_undirty_bonus(dbuf_dirty_record_t *dr)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
if (dr->dt.dl.dr_data != db->db.db_data) {
struct dnode *dn = dr->dr_dnode;
int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
kmem_free(dr->dt.dl.dr_data, max_bonuslen);
arc_space_return(max_bonuslen, ARC_SPACE_BONUS);
}
db->db_data_pending = NULL;
ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
list_remove(&db->db_dirty_records, dr);
if (dr->dr_dbuf->db_level != 0) {
mutex_destroy(&dr->dt.di.dr_mtx);
list_destroy(&dr->dt.di.dr_children);
}
kmem_free(dr, sizeof (dbuf_dirty_record_t));
ASSERT3U(db->db_dirtycnt, >, 0);
db->db_dirtycnt -= 1;
}
/*
* Undirty a buffer in the transaction group referenced by the given
* transaction. Return whether this evicted the dbuf.
*/
boolean_t
dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
{
uint64_t txg = tx->tx_txg;
boolean_t brtwrite;
ASSERT(txg != 0);
/*
* Due to our use of dn_nlevels below, this can only be called
* in open context, unless we are operating on the MOS.
* From syncing context, dn_nlevels may be different from the
* dn_nlevels used when dbuf was dirtied.
*/
ASSERT(db->db_objset ==
dmu_objset_pool(db->db_objset)->dp_meta_objset ||
txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT0(db->db_level);
ASSERT(MUTEX_HELD(&db->db_mtx));
/*
* If this buffer is not dirty, we're done.
*/
dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg);
if (dr == NULL)
return (B_FALSE);
ASSERT(dr->dr_dbuf == db);
brtwrite = dr->dt.dl.dr_brtwrite;
if (brtwrite) {
/*
* We are freeing a block that we cloned in the same
* transaction group.
*/
brt_pending_remove(dmu_objset_spa(db->db_objset),
&dr->dt.dl.dr_overridden_by, tx);
}
dnode_t *dn = dr->dr_dnode;
dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
ASSERT(db->db.db_size != 0);
dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
dr->dr_accounted, txg);
list_remove(&db->db_dirty_records, dr);
/*
* Note that there are three places in dbuf_dirty()
* where this dirty record may be put on a list.
* Make sure to do a list_remove corresponding to
* every one of those list_insert calls.
*/
if (dr->dr_parent) {
mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
list_remove(&dr->dr_parent->dt.di.dr_children, dr);
mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
} else if (db->db_blkid == DMU_SPILL_BLKID ||
db->db_level + 1 == dn->dn_nlevels) {
ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
mutex_enter(&dn->dn_mtx);
list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
mutex_exit(&dn->dn_mtx);
}
if (db->db_state != DB_NOFILL && !brtwrite) {
dbuf_unoverride(dr);
ASSERT(db->db_buf != NULL);
ASSERT(dr->dt.dl.dr_data != NULL);
if (dr->dt.dl.dr_data != db->db_buf)
arc_buf_destroy(dr->dt.dl.dr_data, db);
}
kmem_free(dr, sizeof (dbuf_dirty_record_t));
ASSERT(db->db_dirtycnt > 0);
db->db_dirtycnt -= 1;
if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
ASSERT(db->db_state == DB_NOFILL || brtwrite ||
arc_released(db->db_buf));
dbuf_destroy(db);
return (B_TRUE);
}
return (B_FALSE);
}
static void
dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
boolean_t undirty = B_FALSE;
ASSERT(tx->tx_txg != 0);
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
/*
* Quick check for dirtiness. For already dirty blocks, this
* reduces runtime of this function by >90%, and overall performance
* by 50% for some workloads (e.g. file deletion with indirect blocks
* cached).
*/
mutex_enter(&db->db_mtx);
if (db->db_state == DB_CACHED || db->db_state == DB_NOFILL) {
dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
/*
* It's possible that it is already dirty but not cached,
* because there are some calls to dbuf_dirty() that don't
* go through dmu_buf_will_dirty().
*/
if (dr != NULL) {
if (dr->dt.dl.dr_brtwrite) {
/*
* Block cloning: If we are dirtying a cloned
* block, we cannot simply redirty it, because
* this dr has no data associated with it.
* We will go through a full undirtying below,
* before dirtying it again.
*/
undirty = B_TRUE;
} else {
/* This dbuf is already dirty and cached. */
dbuf_redirty(dr);
mutex_exit(&db->db_mtx);
return;
}
}
}
mutex_exit(&db->db_mtx);
DB_DNODE_ENTER(db);
if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
flags |= DB_RF_HAVESTRUCT;
DB_DNODE_EXIT(db);
/*
* Block cloning: Do the dbuf_read() before undirtying the dbuf, as we
* want to make sure dbuf_read() will read the pending cloned block and
* not the uderlying block that is being replaced. dbuf_undirty() will
* do dbuf_unoverride(), so we will end up with cloned block content,
* without overridden BP.
*/
(void) dbuf_read(db, NULL, flags);
if (undirty) {
mutex_enter(&db->db_mtx);
VERIFY(!dbuf_undirty(db, tx));
mutex_exit(&db->db_mtx);
}
(void) dbuf_dirty(db, tx);
}
void
dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_will_dirty_impl(db_fake,
DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
}
boolean_t
dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dbuf_dirty_record_t *dr;
mutex_enter(&db->db_mtx);
dr = dbuf_find_dirty_eq(db, tx->tx_txg);
mutex_exit(&db->db_mtx);
return (dr != NULL);
}
void
dmu_buf_will_clone(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
/*
* Block cloning: We are going to clone into this block, so undirty
* modifications done to this block so far in this txg. This includes
* writes and clones into this block.
*/
mutex_enter(&db->db_mtx);
VERIFY(!dbuf_undirty(db, tx));
- ASSERT(list_head(&db->db_dirty_records) == NULL);
+ ASSERT3P(dbuf_find_dirty_eq(db, tx->tx_txg), ==, NULL);
if (db->db_buf != NULL) {
arc_buf_destroy(db->db_buf, db);
db->db_buf = NULL;
}
mutex_exit(&db->db_mtx);
dmu_buf_will_not_fill(db_fake, tx);
}
void
dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
mutex_enter(&db->db_mtx);
db->db_state = DB_NOFILL;
DTRACE_SET_STATE(db, "allocating NOFILL buffer");
mutex_exit(&db->db_mtx);
dbuf_noread(db);
(void) dbuf_dirty(db, tx);
}
void
dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(tx->tx_txg != 0);
ASSERT(db->db_level == 0);
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
dmu_tx_private_ok(tx));
mutex_enter(&db->db_mtx);
if (db->db_state == DB_NOFILL) {
/*
* Block cloning: We will be completely overwriting a block
* cloned in this transaction group, so let's undirty the
* pending clone and mark the block as uncached. This will be
* as if the clone was never done.
*/
VERIFY(!dbuf_undirty(db, tx));
db->db_state = DB_UNCACHED;
}
mutex_exit(&db->db_mtx);
dbuf_noread(db);
(void) dbuf_dirty(db, tx);
}
/*
* This function is effectively the same as dmu_buf_will_dirty(), but
* indicates the caller expects raw encrypted data in the db, and provides
* the crypt params (byteorder, salt, iv, mac) which should be stored in the
* blkptr_t when this dbuf is written. This is only used for blocks of
* dnodes, during raw receive.
*/
void
dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dbuf_dirty_record_t *dr;
/*
* dr_has_raw_params is only processed for blocks of dnodes
* (see dbuf_sync_dnode_leaf_crypt()).
*/
ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
ASSERT3U(db->db_level, ==, 0);
ASSERT(db->db_objset->os_raw_receive);
dmu_buf_will_dirty_impl(db_fake,
DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
dr = dbuf_find_dirty_eq(db, tx->tx_txg);
ASSERT3P(dr, !=, NULL);
dr->dt.dl.dr_has_raw_params = B_TRUE;
dr->dt.dl.dr_byteorder = byteorder;
memcpy(dr->dt.dl.dr_salt, salt, ZIO_DATA_SALT_LEN);
memcpy(dr->dt.dl.dr_iv, iv, ZIO_DATA_IV_LEN);
memcpy(dr->dt.dl.dr_mac, mac, ZIO_DATA_MAC_LEN);
}
static void
dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
{
struct dirty_leaf *dl;
dbuf_dirty_record_t *dr;
dr = list_head(&db->db_dirty_records);
ASSERT3P(dr, !=, NULL);
ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
dl = &dr->dt.dl;
dl->dr_overridden_by = *bp;
dl->dr_override_state = DR_OVERRIDDEN;
dl->dr_overridden_by.blk_birth = dr->dr_txg;
}
void
dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx)
{
(void) tx;
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
dbuf_states_t old_state;
mutex_enter(&db->db_mtx);
DBUF_VERIFY(db);
old_state = db->db_state;
db->db_state = DB_CACHED;
if (old_state == DB_FILL) {
if (db->db_level == 0 && db->db_freed_in_flight) {
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
/* we were freed while filling */
/* XXX dbuf_undirty? */
memset(db->db.db_data, 0, db->db.db_size);
db->db_freed_in_flight = FALSE;
DTRACE_SET_STATE(db,
"fill done handling freed in flight");
} else {
DTRACE_SET_STATE(db, "fill done");
}
cv_broadcast(&db->db_changed);
}
mutex_exit(&db->db_mtx);
}
void
dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
bp_embedded_type_t etype, enum zio_compress comp,
int uncompressed_size, int compressed_size, int byteorder,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
struct dirty_leaf *dl;
dmu_object_type_t type;
dbuf_dirty_record_t *dr;
if (etype == BP_EMBEDDED_TYPE_DATA) {
ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
SPA_FEATURE_EMBEDDED_DATA));
}
DB_DNODE_ENTER(db);
type = DB_DNODE(db)->dn_type;
DB_DNODE_EXIT(db);
ASSERT0(db->db_level);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
dmu_buf_will_not_fill(dbuf, tx);
dr = list_head(&db->db_dirty_records);
ASSERT3P(dr, !=, NULL);
ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
dl = &dr->dt.dl;
encode_embedded_bp_compressed(&dl->dr_overridden_by,
data, comp, uncompressed_size, compressed_size);
BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
BP_SET_TYPE(&dl->dr_overridden_by, type);
BP_SET_LEVEL(&dl->dr_overridden_by, 0);
BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
dl->dr_override_state = DR_OVERRIDDEN;
dl->dr_overridden_by.blk_birth = dr->dr_txg;
}
void
dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
dmu_object_type_t type;
ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
DB_DNODE_ENTER(db);
type = DB_DNODE(db)->dn_type;
DB_DNODE_EXIT(db);
ASSERT0(db->db_level);
dmu_buf_will_not_fill(dbuf, tx);
blkptr_t bp = { { { {0} } } };
BP_SET_TYPE(&bp, type);
BP_SET_LEVEL(&bp, 0);
BP_SET_BIRTH(&bp, tx->tx_txg, 0);
BP_SET_REDACTED(&bp);
BPE_SET_LSIZE(&bp, dbuf->db_size);
dbuf_override_impl(db, &bp, tx);
}
/*
* Directly assign a provided arc buf to a given dbuf if it's not referenced
* by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
*/
void
dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
{
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(db->db_level == 0);
ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
ASSERT(buf != NULL);
ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
ASSERT(tx->tx_txg != 0);
arc_return_buf(buf, db);
ASSERT(arc_released(buf));
mutex_enter(&db->db_mtx);
while (db->db_state == DB_READ || db->db_state == DB_FILL)
cv_wait(&db->db_changed, &db->db_mtx);
ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
if (db->db_state == DB_CACHED &&
zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
/*
* In practice, we will never have a case where we have an
* encrypted arc buffer while additional holds exist on the
* dbuf. We don't handle this here so we simply assert that
* fact instead.
*/
ASSERT(!arc_is_encrypted(buf));
mutex_exit(&db->db_mtx);
(void) dbuf_dirty(db, tx);
memcpy(db->db.db_data, buf->b_data, db->db.db_size);
arc_buf_destroy(buf, db);
return;
}
if (db->db_state == DB_CACHED) {
dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
ASSERT(db->db_buf != NULL);
if (dr != NULL && dr->dr_txg == tx->tx_txg) {
ASSERT(dr->dt.dl.dr_data == db->db_buf);
if (!arc_released(db->db_buf)) {
ASSERT(dr->dt.dl.dr_override_state ==
DR_OVERRIDDEN);
arc_release(db->db_buf, db);
}
dr->dt.dl.dr_data = buf;
arc_buf_destroy(db->db_buf, db);
} else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
arc_release(db->db_buf, db);
arc_buf_destroy(db->db_buf, db);
}
db->db_buf = NULL;
}
ASSERT(db->db_buf == NULL);
dbuf_set_data(db, buf);
db->db_state = DB_FILL;
DTRACE_SET_STATE(db, "filling assigned arcbuf");
mutex_exit(&db->db_mtx);
(void) dbuf_dirty(db, tx);
dmu_buf_fill_done(&db->db, tx);
}
void
dbuf_destroy(dmu_buf_impl_t *db)
{
dnode_t *dn;
dmu_buf_impl_t *parent = db->db_parent;
dmu_buf_impl_t *dndb;
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(zfs_refcount_is_zero(&db->db_holds));
if (db->db_buf != NULL) {
arc_buf_destroy(db->db_buf, db);
db->db_buf = NULL;
}
if (db->db_blkid == DMU_BONUS_BLKID) {
int slots = DB_DNODE(db)->dn_num_slots;
int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
if (db->db.db_data != NULL) {
kmem_free(db->db.db_data, bonuslen);
arc_space_return(bonuslen, ARC_SPACE_BONUS);
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "buffer cleared");
}
}
dbuf_clear_data(db);
if (multilist_link_active(&db->db_cache_link)) {
ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
db->db_caching_status == DB_DBUF_METADATA_CACHE);
multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
(void) zfs_refcount_remove_many(
&dbuf_caches[db->db_caching_status].size,
db->db.db_size, db);
if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
DBUF_STAT_BUMPDOWN(metadata_cache_count);
} else {
DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
DBUF_STAT_BUMPDOWN(cache_count);
DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
db->db.db_size);
}
db->db_caching_status = DB_NO_CACHE;
}
ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
ASSERT(db->db_data_pending == NULL);
ASSERT(list_is_empty(&db->db_dirty_records));
db->db_state = DB_EVICTING;
DTRACE_SET_STATE(db, "buffer eviction started");
db->db_blkptr = NULL;
/*
* Now that db_state is DB_EVICTING, nobody else can find this via
* the hash table. We can now drop db_mtx, which allows us to
* acquire the dn_dbufs_mtx.
*/
mutex_exit(&db->db_mtx);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
dndb = dn->dn_dbuf;
if (db->db_blkid != DMU_BONUS_BLKID) {
boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
if (needlock)
mutex_enter_nested(&dn->dn_dbufs_mtx,
NESTED_SINGLE);
avl_remove(&dn->dn_dbufs, db);
membar_producer();
DB_DNODE_EXIT(db);
if (needlock)
mutex_exit(&dn->dn_dbufs_mtx);
/*
* Decrementing the dbuf count means that the hold corresponding
* to the removed dbuf is no longer discounted in dnode_move(),
* so the dnode cannot be moved until after we release the hold.
* The membar_producer() ensures visibility of the decremented
* value in dnode_move(), since DB_DNODE_EXIT doesn't actually
* release any lock.
*/
mutex_enter(&dn->dn_mtx);
dnode_rele_and_unlock(dn, db, B_TRUE);
db->db_dnode_handle = NULL;
dbuf_hash_remove(db);
} else {
DB_DNODE_EXIT(db);
}
ASSERT(zfs_refcount_is_zero(&db->db_holds));
db->db_parent = NULL;
ASSERT(db->db_buf == NULL);
ASSERT(db->db.db_data == NULL);
ASSERT(db->db_hash_next == NULL);
ASSERT(db->db_blkptr == NULL);
ASSERT(db->db_data_pending == NULL);
ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
ASSERT(!multilist_link_active(&db->db_cache_link));
/*
* If this dbuf is referenced from an indirect dbuf,
* decrement the ref count on the indirect dbuf.
*/
if (parent && parent != dndb) {
mutex_enter(&parent->db_mtx);
dbuf_rele_and_unlock(parent, db, B_TRUE);
}
kmem_cache_free(dbuf_kmem_cache, db);
arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
}
/*
* Note: While bpp will always be updated if the function returns success,
* parentp will not be updated if the dnode does not have dn_dbuf filled in;
* this happens when the dnode is the meta-dnode, or {user|group|project}used
* object.
*/
__attribute__((always_inline))
static inline int
dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
dmu_buf_impl_t **parentp, blkptr_t **bpp)
{
*parentp = NULL;
*bpp = NULL;
ASSERT(blkid != DMU_BONUS_BLKID);
if (blkid == DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
if (dn->dn_have_spill &&
(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
*bpp = DN_SPILL_BLKPTR(dn->dn_phys);
else
*bpp = NULL;
dbuf_add_ref(dn->dn_dbuf, NULL);
*parentp = dn->dn_dbuf;
mutex_exit(&dn->dn_mtx);
return (0);
}
int nlevels =
(dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT3U(level * epbs, <, 64);
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
/*
* This assertion shouldn't trip as long as the max indirect block size
* is less than 1M. The reason for this is that up to that point,
* the number of levels required to address an entire object with blocks
* of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In
* other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
* (i.e. we can address the entire object), objects will all use at most
* N-1 levels and the assertion won't overflow. However, once epbs is
* 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be
* enough to address an entire object, so objects will have 5 levels,
* but then this assertion will overflow.
*
* All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
* need to redo this logic to handle overflows.
*/
ASSERT(level >= nlevels ||
((nlevels - level - 1) * epbs) +
highbit64(dn->dn_phys->dn_nblkptr) <= 64);
if (level >= nlevels ||
blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
((nlevels - level - 1) * epbs)) ||
(fail_sparse &&
blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
/* the buffer has no parent yet */
return (SET_ERROR(ENOENT));
} else if (level < nlevels-1) {
/* this block is referenced from an indirect block */
int err;
err = dbuf_hold_impl(dn, level + 1,
blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
if (err)
return (err);
err = dbuf_read(*parentp, NULL,
(DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
if (err) {
dbuf_rele(*parentp, NULL);
*parentp = NULL;
return (err);
}
rw_enter(&(*parentp)->db_rwlock, RW_READER);
*bpp = ((blkptr_t *)(*parentp)->db.db_data) +
(blkid & ((1ULL << epbs) - 1));
if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
ASSERT(BP_IS_HOLE(*bpp));
rw_exit(&(*parentp)->db_rwlock);
return (0);
} else {
/* the block is referenced from the dnode */
ASSERT3U(level, ==, nlevels-1);
ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
blkid < dn->dn_phys->dn_nblkptr);
if (dn->dn_dbuf) {
dbuf_add_ref(dn->dn_dbuf, NULL);
*parentp = dn->dn_dbuf;
}
*bpp = &dn->dn_phys->dn_blkptr[blkid];
return (0);
}
}
static dmu_buf_impl_t *
dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
dmu_buf_impl_t *parent, blkptr_t *blkptr, uint64_t hash)
{
objset_t *os = dn->dn_objset;
dmu_buf_impl_t *db, *odb;
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
ASSERT(dn->dn_type != DMU_OT_NONE);
db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dbuf_node));
db->db_objset = os;
db->db.db_object = dn->dn_object;
db->db_level = level;
db->db_blkid = blkid;
db->db_dirtycnt = 0;
db->db_dnode_handle = dn->dn_handle;
db->db_parent = parent;
db->db_blkptr = blkptr;
db->db_hash = hash;
db->db_user = NULL;
db->db_user_immediate_evict = FALSE;
db->db_freed_in_flight = FALSE;
db->db_pending_evict = FALSE;
if (blkid == DMU_BONUS_BLKID) {
ASSERT3P(parent, ==, dn->dn_dbuf);
db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
(dn->dn_nblkptr-1) * sizeof (blkptr_t);
ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
db->db.db_offset = DMU_BONUS_BLKID;
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "bonus buffer created");
db->db_caching_status = DB_NO_CACHE;
/* the bonus dbuf is not placed in the hash table */
arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
return (db);
} else if (blkid == DMU_SPILL_BLKID) {
db->db.db_size = (blkptr != NULL) ?
BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
db->db.db_offset = 0;
} else {
int blocksize =
db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
db->db.db_size = blocksize;
db->db.db_offset = db->db_blkid * blocksize;
}
/*
* Hold the dn_dbufs_mtx while we get the new dbuf
* in the hash table *and* added to the dbufs list.
* This prevents a possible deadlock with someone
* trying to look up this dbuf before it's added to the
* dn_dbufs list.
*/
mutex_enter(&dn->dn_dbufs_mtx);
db->db_state = DB_EVICTING; /* not worth logging this state change */
if ((odb = dbuf_hash_insert(db)) != NULL) {
/* someone else inserted it first */
mutex_exit(&dn->dn_dbufs_mtx);
kmem_cache_free(dbuf_kmem_cache, db);
DBUF_STAT_BUMP(hash_insert_race);
return (odb);
}
avl_add(&dn->dn_dbufs, db);
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "regular buffer created");
db->db_caching_status = DB_NO_CACHE;
mutex_exit(&dn->dn_dbufs_mtx);
arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
if (parent && parent != dn->dn_dbuf)
dbuf_add_ref(parent, db);
ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
zfs_refcount_count(&dn->dn_holds) > 0);
(void) zfs_refcount_add(&dn->dn_holds, db);
dprintf_dbuf(db, "db=%p\n", db);
return (db);
}
/*
* This function returns a block pointer and information about the object,
* given a dnode and a block. This is a publicly accessible version of
* dbuf_findbp that only returns some information, rather than the
* dbuf. Note that the dnode passed in must be held, and the dn_struct_rwlock
* should be locked as (at least) a reader.
*/
int
dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift)
{
dmu_buf_impl_t *dbp = NULL;
blkptr_t *bp2;
int err = 0;
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2);
if (err == 0) {
ASSERT3P(bp2, !=, NULL);
*bp = *bp2;
if (dbp != NULL)
dbuf_rele(dbp, NULL);
if (datablkszsec != NULL)
*datablkszsec = dn->dn_phys->dn_datablkszsec;
if (indblkshift != NULL)
*indblkshift = dn->dn_phys->dn_indblkshift;
}
return (err);
}
typedef struct dbuf_prefetch_arg {
spa_t *dpa_spa; /* The spa to issue the prefetch in. */
zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
int dpa_curlevel; /* The current level that we're reading */
dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */
void *dpa_arg; /* prefetch completion arg */
} dbuf_prefetch_arg_t;
static void
dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done)
{
if (dpa->dpa_cb != NULL) {
dpa->dpa_cb(dpa->dpa_arg, dpa->dpa_zb.zb_level,
dpa->dpa_zb.zb_blkid, io_done);
}
kmem_free(dpa, sizeof (*dpa));
}
static void
dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb,
const blkptr_t *iobp, arc_buf_t *abuf, void *private)
{
(void) zio, (void) zb, (void) iobp;
dbuf_prefetch_arg_t *dpa = private;
if (abuf != NULL)
arc_buf_destroy(abuf, private);
dbuf_prefetch_fini(dpa, B_TRUE);
}
/*
* Actually issue the prefetch read for the block given.
*/
static void
dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
{
ASSERT(!BP_IS_REDACTED(bp) ||
dsl_dataset_feature_is_active(
dpa->dpa_dnode->dn_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
return (dbuf_prefetch_fini(dpa, B_FALSE));
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
arc_flags_t aflags =
dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
ARC_FLAG_NO_BUF;
/* dnodes are always read as raw and then converted later */
if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
dpa->dpa_curlevel == 0)
zio_flags |= ZIO_FLAG_RAW;
ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
ASSERT(dpa->dpa_zio != NULL);
(void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp,
dbuf_issue_final_prefetch_done, dpa,
dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
}
/*
* Called when an indirect block above our prefetch target is read in. This
* will either read in the next indirect block down the tree or issue the actual
* prefetch if the next block down is our target.
*/
static void
dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
const blkptr_t *iobp, arc_buf_t *abuf, void *private)
{
(void) zb, (void) iobp;
dbuf_prefetch_arg_t *dpa = private;
ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
ASSERT3S(dpa->dpa_curlevel, >, 0);
if (abuf == NULL) {
ASSERT(zio == NULL || zio->io_error != 0);
dbuf_prefetch_fini(dpa, B_TRUE);
return;
}
ASSERT(zio == NULL || zio->io_error == 0);
/*
* The dpa_dnode is only valid if we are called with a NULL
* zio. This indicates that the arc_read() returned without
* first calling zio_read() to issue a physical read. Once
* a physical read is made the dpa_dnode must be invalidated
* as the locks guarding it may have been dropped. If the
* dpa_dnode is still valid, then we want to add it to the dbuf
* cache. To do so, we must hold the dbuf associated with the block
* we just prefetched, read its contents so that we associate it
* with an arc_buf_t, and then release it.
*/
if (zio != NULL) {
ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
} else {
ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
}
ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
dpa->dpa_dnode = NULL;
} else if (dpa->dpa_dnode != NULL) {
uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
(dpa->dpa_epbs * (dpa->dpa_curlevel -
dpa->dpa_zb.zb_level));
dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
dpa->dpa_curlevel, curblkid, FTAG);
if (db == NULL) {
arc_buf_destroy(abuf, private);
dbuf_prefetch_fini(dpa, B_TRUE);
return;
}
(void) dbuf_read(db, NULL,
DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
dbuf_rele(db, FTAG);
}
dpa->dpa_curlevel--;
uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
(dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
ASSERT(!BP_IS_REDACTED(bp) || (dpa->dpa_dnode &&
dsl_dataset_feature_is_active(
dpa->dpa_dnode->dn_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS)));
if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) {
arc_buf_destroy(abuf, private);
dbuf_prefetch_fini(dpa, B_TRUE);
return;
} else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
dbuf_issue_final_prefetch(dpa, bp);
} else {
arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
zbookmark_phys_t zb;
/* flag if L2ARC eligible, l2arc_noprefetch then decides */
if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
iter_aflags |= ARC_FLAG_L2CACHE;
ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
bp, dbuf_prefetch_indirect_done, dpa,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
&iter_aflags, &zb);
}
arc_buf_destroy(abuf, private);
}
/*
* Issue prefetch reads for the given block on the given level. If the indirect
* blocks above that block are not in memory, we will read them in
* asynchronously. As a result, this call never blocks waiting for a read to
* complete. Note that the prefetch might fail if the dataset is encrypted and
* the encryption key is unmapped before the IO completes.
*/
int
dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid,
zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
void *arg)
{
blkptr_t bp;
int epbs, nlevels, curlevel;
uint64_t curblkid;
ASSERT(blkid != DMU_BONUS_BLKID);
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
if (blkid > dn->dn_maxblkid)
goto no_issue;
if (level == 0 && dnode_block_freed(dn, blkid))
goto no_issue;
/*
* This dnode hasn't been written to disk yet, so there's nothing to
* prefetch.
*/
nlevels = dn->dn_phys->dn_nlevels;
if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
goto no_issue;
epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
goto no_issue;
dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
level, blkid, NULL);
if (db != NULL) {
mutex_exit(&db->db_mtx);
/*
* This dbuf already exists. It is either CACHED, or
* (we assume) about to be read or filled.
*/
goto no_issue;
}
/*
* Find the closest ancestor (indirect block) of the target block
* that is present in the cache. In this indirect block, we will
* find the bp that is at curlevel, curblkid.
*/
curlevel = level;
curblkid = blkid;
while (curlevel < nlevels - 1) {
int parent_level = curlevel + 1;
uint64_t parent_blkid = curblkid >> epbs;
dmu_buf_impl_t *db;
if (dbuf_hold_impl(dn, parent_level, parent_blkid,
FALSE, TRUE, FTAG, &db) == 0) {
blkptr_t *bpp = db->db_buf->b_data;
bp = bpp[P2PHASE(curblkid, 1 << epbs)];
dbuf_rele(db, FTAG);
break;
}
curlevel = parent_level;
curblkid = parent_blkid;
}
if (curlevel == nlevels - 1) {
/* No cached indirect blocks found. */
ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
bp = dn->dn_phys->dn_blkptr[curblkid];
}
ASSERT(!BP_IS_REDACTED(&bp) ||
dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp))
goto no_issue;
ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
ZIO_FLAG_CANFAIL);
dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
dn->dn_object, level, blkid);
dpa->dpa_curlevel = curlevel;
dpa->dpa_prio = prio;
dpa->dpa_aflags = aflags;
dpa->dpa_spa = dn->dn_objset->os_spa;
dpa->dpa_dnode = dn;
dpa->dpa_epbs = epbs;
dpa->dpa_zio = pio;
dpa->dpa_cb = cb;
dpa->dpa_arg = arg;
if (!DNODE_LEVEL_IS_CACHEABLE(dn, level))
dpa->dpa_aflags |= ARC_FLAG_UNCACHED;
else if (dnode_level_is_l2cacheable(&bp, dn, level))
dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
/*
* If we have the indirect just above us, no need to do the asynchronous
* prefetch chain; we'll just run the last step ourselves. If we're at
* a higher level, though, we want to issue the prefetches for all the
* indirect blocks asynchronously, so we can go on with whatever we were
* doing.
*/
if (curlevel == level) {
ASSERT3U(curblkid, ==, blkid);
dbuf_issue_final_prefetch(dpa, &bp);
} else {
arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
zbookmark_phys_t zb;
/* flag if L2ARC eligible, l2arc_noprefetch then decides */
if (dnode_level_is_l2cacheable(&bp, dn, level))
iter_aflags |= ARC_FLAG_L2CACHE;
SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
dn->dn_object, curlevel, curblkid);
(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
&bp, dbuf_prefetch_indirect_done, dpa,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
&iter_aflags, &zb);
}
/*
* We use pio here instead of dpa_zio since it's possible that
* dpa may have already been freed.
*/
zio_nowait(pio);
return (1);
no_issue:
if (cb != NULL)
cb(arg, level, blkid, B_FALSE);
return (0);
}
int
dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
arc_flags_t aflags)
{
return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL));
}
/*
* Helper function for dbuf_hold_impl() to copy a buffer. Handles
* the case of encrypted, compressed and uncompressed buffers by
* allocating the new buffer, respectively, with arc_alloc_raw_buf(),
* arc_alloc_compressed_buf() or arc_alloc_buf().*
*
* NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl().
*/
noinline static void
dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
{
dbuf_dirty_record_t *dr = db->db_data_pending;
arc_buf_t *data = dr->dt.dl.dr_data;
enum zio_compress compress_type = arc_get_compression(data);
uint8_t complevel = arc_get_complevel(data);
if (arc_is_encrypted(data)) {
boolean_t byteorder;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
arc_get_raw_params(data, &byteorder, salt, iv, mac);
dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
compress_type, complevel));
} else if (compress_type != ZIO_COMPRESS_OFF) {
dbuf_set_data(db, arc_alloc_compressed_buf(
dn->dn_objset->os_spa, db, arc_buf_size(data),
arc_buf_lsize(data), compress_type, complevel));
} else {
dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
DBUF_GET_BUFC_TYPE(db), db->db.db_size));
}
rw_enter(&db->db_rwlock, RW_WRITER);
memcpy(db->db.db_data, data->b_data, arc_buf_size(data));
rw_exit(&db->db_rwlock);
}
/*
* Returns with db_holds incremented, and db_mtx not held.
* Note: dn_struct_rwlock must be held.
*/
int
dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
boolean_t fail_sparse, boolean_t fail_uncached,
const void *tag, dmu_buf_impl_t **dbp)
{
dmu_buf_impl_t *db, *parent = NULL;
uint64_t hv;
/* If the pool has been created, verify the tx_sync_lock is not held */
spa_t *spa = dn->dn_objset->os_spa;
dsl_pool_t *dp = spa->spa_dsl_pool;
if (dp != NULL) {
ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock));
}
ASSERT(blkid != DMU_BONUS_BLKID);
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
ASSERT3U(dn->dn_nlevels, >, level);
*dbp = NULL;
/* dbuf_find() returns with db_mtx held */
db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid, &hv);
if (db == NULL) {
blkptr_t *bp = NULL;
int err;
if (fail_uncached)
return (SET_ERROR(ENOENT));
ASSERT3P(parent, ==, NULL);
err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
if (fail_sparse) {
if (err == 0 && bp && BP_IS_HOLE(bp))
err = SET_ERROR(ENOENT);
if (err) {
if (parent)
dbuf_rele(parent, NULL);
return (err);
}
}
if (err && err != ENOENT)
return (err);
db = dbuf_create(dn, level, blkid, parent, bp, hv);
}
if (fail_uncached && db->db_state != DB_CACHED) {
mutex_exit(&db->db_mtx);
return (SET_ERROR(ENOENT));
}
if (db->db_buf != NULL) {
arc_buf_access(db->db_buf);
ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
}
ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
/*
* If this buffer is currently syncing out, and we are
* still referencing it from db_data, we need to make a copy
* of it in case we decide we want to dirty it again in this txg.
*/
if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
dn->dn_object != DMU_META_DNODE_OBJECT &&
db->db_state == DB_CACHED && db->db_data_pending) {
dbuf_dirty_record_t *dr = db->db_data_pending;
if (dr->dt.dl.dr_data == db->db_buf) {
ASSERT3P(db->db_buf, !=, NULL);
dbuf_hold_copy(dn, db);
}
}
if (multilist_link_active(&db->db_cache_link)) {
ASSERT(zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
db->db_caching_status == DB_DBUF_METADATA_CACHE);
multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
(void) zfs_refcount_remove_many(
&dbuf_caches[db->db_caching_status].size,
db->db.db_size, db);
if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
DBUF_STAT_BUMPDOWN(metadata_cache_count);
} else {
DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
DBUF_STAT_BUMPDOWN(cache_count);
DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
db->db.db_size);
}
db->db_caching_status = DB_NO_CACHE;
}
(void) zfs_refcount_add(&db->db_holds, tag);
DBUF_VERIFY(db);
mutex_exit(&db->db_mtx);
/* NOTE: we can't rele the parent until after we drop the db_mtx */
if (parent)
dbuf_rele(parent, NULL);
ASSERT3P(DB_DNODE(db), ==, dn);
ASSERT3U(db->db_blkid, ==, blkid);
ASSERT3U(db->db_level, ==, level);
*dbp = db;
return (0);
}
dmu_buf_impl_t *
dbuf_hold(dnode_t *dn, uint64_t blkid, const void *tag)
{
return (dbuf_hold_level(dn, 0, blkid, tag));
}
dmu_buf_impl_t *
dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, const void *tag)
{
dmu_buf_impl_t *db;
int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
return (err ? NULL : db);
}
void
dbuf_create_bonus(dnode_t *dn)
{
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
ASSERT(dn->dn_bonus == NULL);
dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL,
dbuf_hash(dn->dn_objset, dn->dn_object, 0, DMU_BONUS_BLKID));
}
int
dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
if (db->db_blkid != DMU_SPILL_BLKID)
return (SET_ERROR(ENOTSUP));
if (blksz == 0)
blksz = SPA_MINBLOCKSIZE;
ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
dbuf_new_size(db, blksz, tx);
return (0);
}
void
dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
{
dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
}
#pragma weak dmu_buf_add_ref = dbuf_add_ref
void
dbuf_add_ref(dmu_buf_impl_t *db, const void *tag)
{
int64_t holds = zfs_refcount_add(&db->db_holds, tag);
VERIFY3S(holds, >, 1);
}
#pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
boolean_t
dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
const void *tag)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dmu_buf_impl_t *found_db;
boolean_t result = B_FALSE;
if (blkid == DMU_BONUS_BLKID)
found_db = dbuf_find_bonus(os, obj);
else
found_db = dbuf_find(os, obj, 0, blkid, NULL);
if (found_db != NULL) {
if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
(void) zfs_refcount_add(&db->db_holds, tag);
result = B_TRUE;
}
mutex_exit(&found_db->db_mtx);
}
return (result);
}
/*
* If you call dbuf_rele() you had better not be referencing the dnode handle
* unless you have some other direct or indirect hold on the dnode. (An indirect
* hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
* Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
* dnode's parent dbuf evicting its dnode handles.
*/
void
dbuf_rele(dmu_buf_impl_t *db, const void *tag)
{
mutex_enter(&db->db_mtx);
dbuf_rele_and_unlock(db, tag, B_FALSE);
}
void
dmu_buf_rele(dmu_buf_t *db, const void *tag)
{
dbuf_rele((dmu_buf_impl_t *)db, tag);
}
/*
* dbuf_rele() for an already-locked dbuf. This is necessary to allow
* db_dirtycnt and db_holds to be updated atomically. The 'evicting'
* argument should be set if we are already in the dbuf-evicting code
* path, in which case we don't want to recursively evict. This allows us to
* avoid deeply nested stacks that would have a call flow similar to this:
*
* dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
* ^ |
* | |
* +-----dbuf_destroy()<--dbuf_evict_one()<--------+
*
*/
void
dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag, boolean_t evicting)
{
int64_t holds;
uint64_t size;
ASSERT(MUTEX_HELD(&db->db_mtx));
DBUF_VERIFY(db);
/*
* Remove the reference to the dbuf before removing its hold on the
* dnode so we can guarantee in dnode_move() that a referenced bonus
* buffer has a corresponding dnode hold.
*/
holds = zfs_refcount_remove(&db->db_holds, tag);
ASSERT(holds >= 0);
/*
* We can't freeze indirects if there is a possibility that they
* may be modified in the current syncing context.
*/
if (db->db_buf != NULL &&
holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
arc_buf_freeze(db->db_buf);
}
if (holds == db->db_dirtycnt &&
db->db_level == 0 && db->db_user_immediate_evict)
dbuf_evict_user(db);
if (holds == 0) {
if (db->db_blkid == DMU_BONUS_BLKID) {
dnode_t *dn;
boolean_t evict_dbuf = db->db_pending_evict;
/*
* If the dnode moves here, we cannot cross this
* barrier until the move completes.
*/
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
atomic_dec_32(&dn->dn_dbufs_count);
/*
* Decrementing the dbuf count means that the bonus
* buffer's dnode hold is no longer discounted in
* dnode_move(). The dnode cannot move until after
* the dnode_rele() below.
*/
DB_DNODE_EXIT(db);
/*
* Do not reference db after its lock is dropped.
* Another thread may evict it.
*/
mutex_exit(&db->db_mtx);
if (evict_dbuf)
dnode_evict_bonus(dn);
dnode_rele(dn, db);
} else if (db->db_buf == NULL) {
/*
* This is a special case: we never associated this
* dbuf with any data allocated from the ARC.
*/
ASSERT(db->db_state == DB_UNCACHED ||
db->db_state == DB_NOFILL);
dbuf_destroy(db);
} else if (arc_released(db->db_buf)) {
/*
* This dbuf has anonymous data associated with it.
*/
dbuf_destroy(db);
} else if (!(DBUF_IS_CACHEABLE(db) || db->db_partial_read) ||
db->db_pending_evict) {
dbuf_destroy(db);
} else if (!multilist_link_active(&db->db_cache_link)) {
ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
dbuf_cached_state_t dcs =
dbuf_include_in_metadata_cache(db) ?
DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
db->db_caching_status = dcs;
multilist_insert(&dbuf_caches[dcs].cache, db);
uint64_t db_size = db->db.db_size;
size = zfs_refcount_add_many(
&dbuf_caches[dcs].size, db_size, db);
uint8_t db_level = db->db_level;
mutex_exit(&db->db_mtx);
if (dcs == DB_DBUF_METADATA_CACHE) {
DBUF_STAT_BUMP(metadata_cache_count);
DBUF_STAT_MAX(metadata_cache_size_bytes_max,
size);
} else {
DBUF_STAT_BUMP(cache_count);
DBUF_STAT_MAX(cache_size_bytes_max, size);
DBUF_STAT_BUMP(cache_levels[db_level]);
DBUF_STAT_INCR(cache_levels_bytes[db_level],
db_size);
}
if (dcs == DB_DBUF_CACHE && !evicting)
dbuf_evict_notify(size);
}
} else {
mutex_exit(&db->db_mtx);
}
}
#pragma weak dmu_buf_refcount = dbuf_refcount
uint64_t
dbuf_refcount(dmu_buf_impl_t *db)
{
return (zfs_refcount_count(&db->db_holds));
}
uint64_t
dmu_buf_user_refcount(dmu_buf_t *db_fake)
{
uint64_t holds;
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
mutex_enter(&db->db_mtx);
ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
mutex_exit(&db->db_mtx);
return (holds);
}
void *
dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
dmu_buf_user_t *new_user)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
mutex_enter(&db->db_mtx);
dbuf_verify_user(db, DBVU_NOT_EVICTING);
if (db->db_user == old_user)
db->db_user = new_user;
else
old_user = db->db_user;
dbuf_verify_user(db, DBVU_NOT_EVICTING);
mutex_exit(&db->db_mtx);
return (old_user);
}
void *
dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
{
return (dmu_buf_replace_user(db_fake, NULL, user));
}
void *
dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
db->db_user_immediate_evict = TRUE;
return (dmu_buf_set_user(db_fake, user));
}
void *
dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
{
return (dmu_buf_replace_user(db_fake, user, NULL));
}
void *
dmu_buf_get_user(dmu_buf_t *db_fake)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dbuf_verify_user(db, DBVU_NOT_EVICTING);
return (db->db_user);
}
void
dmu_buf_user_evict_wait(void)
{
taskq_wait(dbu_evict_taskq);
}
blkptr_t *
dmu_buf_get_blkptr(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
return (dbi->db_blkptr);
}
objset_t *
dmu_buf_get_objset(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
return (dbi->db_objset);
}
dnode_t *
dmu_buf_dnode_enter(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
DB_DNODE_ENTER(dbi);
return (DB_DNODE(dbi));
}
void
dmu_buf_dnode_exit(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
DB_DNODE_EXIT(dbi);
}
static void
dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
{
/* ASSERT(dmu_tx_is_syncing(tx) */
ASSERT(MUTEX_HELD(&db->db_mtx));
if (db->db_blkptr != NULL)
return;
if (db->db_blkid == DMU_SPILL_BLKID) {
db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
BP_ZERO(db->db_blkptr);
return;
}
if (db->db_level == dn->dn_phys->dn_nlevels-1) {
/*
* This buffer was allocated at a time when there was
* no available blkptrs from the dnode, or it was
* inappropriate to hook it in (i.e., nlevels mismatch).
*/
ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
ASSERT(db->db_parent == NULL);
db->db_parent = dn->dn_dbuf;
db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
DBUF_VERIFY(db);
} else {
dmu_buf_impl_t *parent = db->db_parent;
int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT(dn->dn_phys->dn_nlevels > 1);
if (parent == NULL) {
mutex_exit(&db->db_mtx);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
parent = dbuf_hold_level(dn, db->db_level + 1,
db->db_blkid >> epbs, db);
rw_exit(&dn->dn_struct_rwlock);
mutex_enter(&db->db_mtx);
db->db_parent = parent;
}
db->db_blkptr = (blkptr_t *)parent->db.db_data +
(db->db_blkid & ((1ULL << epbs) - 1));
DBUF_VERIFY(db);
}
}
static void
dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
void *data = dr->dt.dl.dr_data;
ASSERT0(db->db_level);
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(db->db_blkid == DMU_BONUS_BLKID);
ASSERT(data != NULL);
dnode_t *dn = dr->dr_dnode;
ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
memcpy(DN_BONUS(dn->dn_phys), data, DN_MAX_BONUS_LEN(dn->dn_phys));
dbuf_sync_leaf_verify_bonus_dnode(dr);
dbuf_undirty_bonus(dr);
dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
}
/*
* When syncing out a blocks of dnodes, adjust the block to deal with
* encryption. Normally, we make sure the block is decrypted before writing
* it. If we have crypt params, then we are writing a raw (encrypted) block,
* from a raw receive. In this case, set the ARC buf's crypt params so
* that the BP will be filled with the correct byteorder, salt, iv, and mac.
*/
static void
dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
{
int err;
dmu_buf_impl_t *db = dr->dr_dbuf;
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
ASSERT3U(db->db_level, ==, 0);
if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
zbookmark_phys_t zb;
/*
* Unfortunately, there is currently no mechanism for
* syncing context to handle decryption errors. An error
* here is only possible if an attacker maliciously
* changed a dnode block and updated the associated
* checksums going up the block tree.
*/
SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
db->db.db_object, db->db_level, db->db_blkid);
err = arc_untransform(db->db_buf, db->db_objset->os_spa,
&zb, B_TRUE);
if (err)
panic("Invalid dnode block MAC");
} else if (dr->dt.dl.dr_has_raw_params) {
(void) arc_release(dr->dt.dl.dr_data, db);
arc_convert_to_raw(dr->dt.dl.dr_data,
dmu_objset_id(db->db_objset),
dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
}
}
/*
* dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
* is critical the we not allow the compiler to inline this function in to
* dbuf_sync_list() thereby drastically bloating the stack usage.
*/
noinline static void
dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
dnode_t *dn = dr->dr_dnode;
ASSERT(dmu_tx_is_syncing(tx));
dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
mutex_enter(&db->db_mtx);
ASSERT(db->db_level > 0);
DBUF_VERIFY(db);
/* Read the block if it hasn't been read yet. */
if (db->db_buf == NULL) {
mutex_exit(&db->db_mtx);
(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
mutex_enter(&db->db_mtx);
}
ASSERT3U(db->db_state, ==, DB_CACHED);
ASSERT(db->db_buf != NULL);
/* Indirect block size must match what the dnode thinks it is. */
ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
dbuf_check_blkptr(dn, db);
/* Provide the pending dirty record to child dbufs */
db->db_data_pending = dr;
mutex_exit(&db->db_mtx);
dbuf_write(dr, db->db_buf, tx);
zio_t *zio = dr->dr_zio;
mutex_enter(&dr->dt.di.dr_mtx);
dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
mutex_exit(&dr->dt.di.dr_mtx);
zio_nowait(zio);
}
/*
* Verify that the size of the data in our bonus buffer does not exceed
* its recorded size.
*
* The purpose of this verification is to catch any cases in development
* where the size of a phys structure (i.e space_map_phys_t) grows and,
* due to incorrect feature management, older pools expect to read more
* data even though they didn't actually write it to begin with.
*
* For a example, this would catch an error in the feature logic where we
* open an older pool and we expect to write the space map histogram of
* a space map with size SPACE_MAP_SIZE_V0.
*/
static void
dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
{
#ifdef ZFS_DEBUG
dnode_t *dn = dr->dr_dnode;
/*
* Encrypted bonus buffers can have data past their bonuslen.
* Skip the verification of these blocks.
*/
if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))
return;
uint16_t bonuslen = dn->dn_phys->dn_bonuslen;
uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
ASSERT3U(bonuslen, <=, maxbonuslen);
arc_buf_t *datap = dr->dt.dl.dr_data;
char *datap_end = ((char *)datap) + bonuslen;
char *datap_max = ((char *)datap) + maxbonuslen;
/* ensure that everything is zero after our data */
for (; datap_end < datap_max; datap_end++)
ASSERT(*datap_end == 0);
#endif
}
static blkptr_t *
dbuf_lightweight_bp(dbuf_dirty_record_t *dr)
{
/* This must be a lightweight dirty record. */
ASSERT3P(dr->dr_dbuf, ==, NULL);
dnode_t *dn = dr->dr_dnode;
if (dn->dn_phys->dn_nlevels == 1) {
VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr);
return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]);
} else {
dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf;
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
VERIFY3U(parent_db->db_level, ==, 1);
VERIFY3P(parent_db->db_dnode_handle->dnh_dnode, ==, dn);
VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid);
blkptr_t *bp = parent_db->db.db_data;
return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]);
}
}
static void
dbuf_lightweight_ready(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
blkptr_t *bp = zio->io_bp;
if (zio->io_error != 0)
return;
dnode_t *dn = dr->dr_dnode;
blkptr_t *bp_orig = dbuf_lightweight_bp(dr);
spa_t *spa = dmu_objset_spa(dn->dn_objset);
int64_t delta = bp_get_dsize_sync(spa, bp) -
bp_get_dsize_sync(spa, bp_orig);
dnode_diduse_space(dn, delta);
uint64_t blkid = dr->dt.dll.dr_blkid;
mutex_enter(&dn->dn_mtx);
if (blkid > dn->dn_phys->dn_maxblkid) {
ASSERT0(dn->dn_objset->os_raw_receive);
dn->dn_phys->dn_maxblkid = blkid;
}
mutex_exit(&dn->dn_mtx);
if (!BP_IS_EMBEDDED(bp)) {
uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1;
BP_SET_FILL(bp, fill);
}
dmu_buf_impl_t *parent_db;
EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1);
if (dr->dr_parent == NULL) {
parent_db = dn->dn_dbuf;
} else {
parent_db = dr->dr_parent->dr_dbuf;
}
rw_enter(&parent_db->db_rwlock, RW_WRITER);
*bp_orig = *bp;
rw_exit(&parent_db->db_rwlock);
}
static void
dbuf_lightweight_done(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
VERIFY0(zio->io_error);
objset_t *os = dr->dr_dnode->dn_objset;
dmu_tx_t *tx = os->os_synctx;
if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
} else {
dsl_dataset_t *ds = os->os_dsl_dataset;
(void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE);
dsl_dataset_block_born(ds, zio->io_bp, tx);
}
dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted,
zio->io_txg);
abd_free(dr->dt.dll.dr_abd);
kmem_free(dr, sizeof (*dr));
}
noinline static void
dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
dnode_t *dn = dr->dr_dnode;
zio_t *pio;
if (dn->dn_phys->dn_nlevels == 1) {
pio = dn->dn_zio;
} else {
pio = dr->dr_parent->dr_zio;
}
zbookmark_phys_t zb = {
.zb_objset = dmu_objset_id(dn->dn_objset),
.zb_object = dn->dn_object,
.zb_level = 0,
.zb_blkid = dr->dt.dll.dr_blkid,
};
/*
* See comment in dbuf_write(). This is so that zio->io_bp_orig
* will have the old BP in dbuf_lightweight_done().
*/
dr->dr_bp_copy = *dbuf_lightweight_bp(dr);
dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset),
dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd,
dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd),
&dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL,
dbuf_lightweight_done, dr, ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb);
zio_nowait(dr->dr_zio);
}
/*
* dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
* critical the we not allow the compiler to inline this function in to
* dbuf_sync_list() thereby drastically bloating the stack usage.
*/
noinline static void
dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
arc_buf_t **datap = &dr->dt.dl.dr_data;
dmu_buf_impl_t *db = dr->dr_dbuf;
dnode_t *dn = dr->dr_dnode;
objset_t *os;
uint64_t txg = tx->tx_txg;
ASSERT(dmu_tx_is_syncing(tx));
dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
mutex_enter(&db->db_mtx);
/*
* To be synced, we must be dirtied. But we
* might have been freed after the dirty.
*/
if (db->db_state == DB_UNCACHED) {
/* This buffer has been freed since it was dirtied */
ASSERT(db->db.db_data == NULL);
} else if (db->db_state == DB_FILL) {
/* This buffer was freed and is now being re-filled */
ASSERT(db->db.db_data != dr->dt.dl.dr_data);
+ } else if (db->db_state == DB_READ) {
+ /*
+ * This buffer has a clone we need to write, and an in-flight
+ * read on the BP we're about to clone. Its safe to issue the
+ * write here because the read has already been issued and the
+ * contents won't change.
+ */
+ ASSERT(dr->dt.dl.dr_brtwrite &&
+ dr->dt.dl.dr_override_state == DR_OVERRIDDEN);
} else {
ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
}
DBUF_VERIFY(db);
if (db->db_blkid == DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
/*
* In the previous transaction group, the bonus buffer
* was entirely used to store the attributes for the
* dnode which overrode the dn_spill field. However,
* when adding more attributes to the file a spill
* block was required to hold the extra attributes.
*
* Make sure to clear the garbage left in the dn_spill
* field from the previous attributes in the bonus
* buffer. Otherwise, after writing out the spill
* block to the new allocated dva, it will free
* the old block pointed to by the invalid dn_spill.
*/
db->db_blkptr = NULL;
}
dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
mutex_exit(&dn->dn_mtx);
}
/*
* If this is a bonus buffer, simply copy the bonus data into the
* dnode. It will be written out when the dnode is synced (and it
* will be synced, since it must have been dirty for dbuf_sync to
* be called).
*/
if (db->db_blkid == DMU_BONUS_BLKID) {
ASSERT(dr->dr_dbuf == db);
dbuf_sync_bonus(dr, tx);
return;
}
os = dn->dn_objset;
/*
* This function may have dropped the db_mtx lock allowing a dmu_sync
* operation to sneak in. As a result, we need to ensure that we
* don't check the dr_override_state until we have returned from
* dbuf_check_blkptr.
*/
dbuf_check_blkptr(dn, db);
/*
* If this buffer is in the middle of an immediate write,
* wait for the synchronous IO to complete.
*/
while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
cv_wait(&db->db_changed, &db->db_mtx);
}
/*
* If this is a dnode block, ensure it is appropriately encrypted
* or decrypted, depending on what we are writing to it this txg.
*/
if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
dbuf_prepare_encrypted_dnode_leaf(dr);
if (db->db_state != DB_NOFILL &&
dn->dn_object != DMU_META_DNODE_OBJECT &&
zfs_refcount_count(&db->db_holds) > 1 &&
dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
*datap == db->db_buf) {
/*
* If this buffer is currently "in use" (i.e., there
* are active holds and db_data still references it),
* then make a copy before we start the write so that
* any modifications from the open txg will not leak
* into this write.
*
* NOTE: this copy does not need to be made for
* objects only modified in the syncing context (e.g.
* DNONE_DNODE blocks).
*/
int psize = arc_buf_size(*datap);
int lsize = arc_buf_lsize(*datap);
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
enum zio_compress compress_type = arc_get_compression(*datap);
uint8_t complevel = arc_get_complevel(*datap);
if (arc_is_encrypted(*datap)) {
boolean_t byteorder;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
*datap = arc_alloc_raw_buf(os->os_spa, db,
dmu_objset_id(os), byteorder, salt, iv, mac,
dn->dn_type, psize, lsize, compress_type,
complevel);
} else if (compress_type != ZIO_COMPRESS_OFF) {
ASSERT3U(type, ==, ARC_BUFC_DATA);
*datap = arc_alloc_compressed_buf(os->os_spa, db,
psize, lsize, compress_type, complevel);
} else {
*datap = arc_alloc_buf(os->os_spa, db, type, psize);
}
memcpy((*datap)->b_data, db->db.db_data, psize);
}
db->db_data_pending = dr;
mutex_exit(&db->db_mtx);
dbuf_write(dr, *datap, tx);
ASSERT(!list_link_active(&dr->dr_dirty_node));
if (dn->dn_object == DMU_META_DNODE_OBJECT) {
list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr);
} else {
zio_nowait(dr->dr_zio);
}
}
void
dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
{
dbuf_dirty_record_t *dr;
while ((dr = list_head(list))) {
if (dr->dr_zio != NULL) {
/*
* If we find an already initialized zio then we
* are processing the meta-dnode, and we have finished.
* The dbufs for all dnodes are put back on the list
* during processing, so that we can zio_wait()
* these IOs after initiating all child IOs.
*/
ASSERT3U(dr->dr_dbuf->db.db_object, ==,
DMU_META_DNODE_OBJECT);
break;
}
list_remove(list, dr);
if (dr->dr_dbuf == NULL) {
dbuf_sync_lightweight(dr, tx);
} else {
if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
VERIFY3U(dr->dr_dbuf->db_level, ==, level);
}
if (dr->dr_dbuf->db_level > 0)
dbuf_sync_indirect(dr, tx);
else
dbuf_sync_leaf(dr, tx);
}
}
}
static void
dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
{
(void) buf;
dmu_buf_impl_t *db = vdb;
dnode_t *dn;
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
spa_t *spa = zio->io_spa;
int64_t delta;
uint64_t fill = 0;
int i;
ASSERT3P(db->db_blkptr, !=, NULL);
ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
zio->io_prev_space_delta = delta;
if (bp->blk_birth != 0) {
ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
BP_GET_TYPE(bp) == dn->dn_type) ||
(db->db_blkid == DMU_SPILL_BLKID &&
BP_GET_TYPE(bp) == dn->dn_bonustype) ||
BP_IS_EMBEDDED(bp));
ASSERT(BP_GET_LEVEL(bp) == db->db_level);
}
mutex_enter(&db->db_mtx);
#ifdef ZFS_DEBUG
if (db->db_blkid == DMU_SPILL_BLKID) {
ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
ASSERT(!(BP_IS_HOLE(bp)) &&
db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
}
#endif
if (db->db_level == 0) {
mutex_enter(&dn->dn_mtx);
if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
db->db_blkid != DMU_SPILL_BLKID) {
ASSERT0(db->db_objset->os_raw_receive);
dn->dn_phys->dn_maxblkid = db->db_blkid;
}
mutex_exit(&dn->dn_mtx);
if (dn->dn_type == DMU_OT_DNODE) {
i = 0;
while (i < db->db.db_size) {
dnode_phys_t *dnp =
(void *)(((char *)db->db.db_data) + i);
i += DNODE_MIN_SIZE;
if (dnp->dn_type != DMU_OT_NONE) {
fill++;
for (int j = 0; j < dnp->dn_nblkptr;
j++) {
(void) zfs_blkptr_verify(spa,
&dnp->dn_blkptr[j],
BLK_CONFIG_SKIP,
BLK_VERIFY_HALT);
}
if (dnp->dn_flags &
DNODE_FLAG_SPILL_BLKPTR) {
(void) zfs_blkptr_verify(spa,
DN_SPILL_BLKPTR(dnp),
BLK_CONFIG_SKIP,
BLK_VERIFY_HALT);
}
i += dnp->dn_extra_slots *
DNODE_MIN_SIZE;
}
}
} else {
if (BP_IS_HOLE(bp)) {
fill = 0;
} else {
fill = 1;
}
}
} else {
blkptr_t *ibp = db->db.db_data;
ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
if (BP_IS_HOLE(ibp))
continue;
(void) zfs_blkptr_verify(spa, ibp,
BLK_CONFIG_SKIP, BLK_VERIFY_HALT);
fill += BP_GET_FILL(ibp);
}
}
DB_DNODE_EXIT(db);
if (!BP_IS_EMBEDDED(bp))
BP_SET_FILL(bp, fill);
mutex_exit(&db->db_mtx);
db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG);
*db->db_blkptr = *bp;
dmu_buf_unlock_parent(db, dblt, FTAG);
}
/*
* This function gets called just prior to running through the compression
* stage of the zio pipeline. If we're an indirect block comprised of only
* holes, then we want this indirect to be compressed away to a hole. In
* order to do that we must zero out any information about the holes that
* this indirect points to prior to before we try to compress it.
*/
static void
dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
{
(void) zio, (void) buf;
dmu_buf_impl_t *db = vdb;
dnode_t *dn;
blkptr_t *bp;
unsigned int epbs, i;
ASSERT3U(db->db_level, >, 0);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT3U(epbs, <, 31);
/* Determine if all our children are holes */
for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
if (!BP_IS_HOLE(bp))
break;
}
/*
* If all the children are holes, then zero them all out so that
* we may get compressed away.
*/
if (i == 1ULL << epbs) {
/*
* We only found holes. Grab the rwlock to prevent
* anybody from reading the blocks we're about to
* zero out.
*/
rw_enter(&db->db_rwlock, RW_WRITER);
memset(db->db.db_data, 0, db->db.db_size);
rw_exit(&db->db_rwlock);
}
DB_DNODE_EXIT(db);
}
static void
dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
{
(void) buf;
dmu_buf_impl_t *db = vdb;
blkptr_t *bp_orig = &zio->io_bp_orig;
blkptr_t *bp = db->db_blkptr;
objset_t *os = db->db_objset;
dmu_tx_t *tx = os->os_synctx;
ASSERT0(zio->io_error);
ASSERT(db->db_blkptr == bp);
/*
* For nopwrites and rewrites we ensure that the bp matches our
* original and bypass all the accounting.
*/
if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
ASSERT(BP_EQUAL(bp, bp_orig));
} else {
dsl_dataset_t *ds = os->os_dsl_dataset;
(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
dsl_dataset_block_born(ds, bp, tx);
}
mutex_enter(&db->db_mtx);
DBUF_VERIFY(db);
dbuf_dirty_record_t *dr = db->db_data_pending;
dnode_t *dn = dr->dr_dnode;
ASSERT(!list_link_active(&dr->dr_dirty_node));
ASSERT(dr->dr_dbuf == db);
ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
list_remove(&db->db_dirty_records, dr);
#ifdef ZFS_DEBUG
if (db->db_blkid == DMU_SPILL_BLKID) {
ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
}
#endif
if (db->db_level == 0) {
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
if (db->db_state != DB_NOFILL) {
if (dr->dt.dl.dr_data != NULL &&
dr->dt.dl.dr_data != db->db_buf) {
arc_buf_destroy(dr->dt.dl.dr_data, db);
}
}
} else {
ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
if (!BP_IS_HOLE(db->db_blkptr)) {
int epbs __maybe_unused = dn->dn_phys->dn_indblkshift -
SPA_BLKPTRSHIFT;
ASSERT3U(db->db_blkid, <=,
dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
db->db.db_size);
}
mutex_destroy(&dr->dt.di.dr_mtx);
list_destroy(&dr->dt.di.dr_children);
}
cv_broadcast(&db->db_changed);
ASSERT(db->db_dirtycnt > 0);
db->db_dirtycnt -= 1;
db->db_data_pending = NULL;
dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted,
zio->io_txg);
kmem_free(dr, sizeof (dbuf_dirty_record_t));
}
static void
dbuf_write_nofill_ready(zio_t *zio)
{
dbuf_write_ready(zio, NULL, zio->io_private);
}
static void
dbuf_write_nofill_done(zio_t *zio)
{
dbuf_write_done(zio, NULL, zio->io_private);
}
static void
dbuf_write_override_ready(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
dmu_buf_impl_t *db = dr->dr_dbuf;
dbuf_write_ready(zio, NULL, db);
}
static void
dbuf_write_override_done(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
dmu_buf_impl_t *db = dr->dr_dbuf;
blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
mutex_enter(&db->db_mtx);
if (!BP_EQUAL(zio->io_bp, obp)) {
if (!BP_IS_HOLE(obp))
dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
arc_release(dr->dt.dl.dr_data, db);
}
mutex_exit(&db->db_mtx);
dbuf_write_done(zio, NULL, db);
if (zio->io_abd != NULL)
abd_free(zio->io_abd);
}
typedef struct dbuf_remap_impl_callback_arg {
objset_t *drica_os;
uint64_t drica_blk_birth;
dmu_tx_t *drica_tx;
} dbuf_remap_impl_callback_arg_t;
static void
dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
void *arg)
{
dbuf_remap_impl_callback_arg_t *drica = arg;
objset_t *os = drica->drica_os;
spa_t *spa = dmu_objset_spa(os);
dmu_tx_t *tx = drica->drica_tx;
ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
if (os == spa_meta_objset(spa)) {
spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
} else {
dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
size, drica->drica_blk_birth, tx);
}
}
static void
dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx)
{
blkptr_t bp_copy = *bp;
spa_t *spa = dmu_objset_spa(dn->dn_objset);
dbuf_remap_impl_callback_arg_t drica;
ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
drica.drica_os = dn->dn_objset;
drica.drica_blk_birth = bp->blk_birth;
drica.drica_tx = tx;
if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
&drica)) {
/*
* If the blkptr being remapped is tracked by a livelist,
* then we need to make sure the livelist reflects the update.
* First, cancel out the old blkptr by appending a 'FREE'
* entry. Next, add an 'ALLOC' to track the new version. This
* way we avoid trying to free an inaccurate blkptr at delete.
* Note that embedded blkptrs are not tracked in livelists.
*/
if (dn->dn_objset != spa_meta_objset(spa)) {
dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset);
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
bp->blk_birth > ds->ds_dir->dd_origin_txg) {
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(dsl_dir_is_clone(ds->ds_dir));
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_LIVELIST));
bplist_append(&ds->ds_dir->dd_pending_frees,
bp);
bplist_append(&ds->ds_dir->dd_pending_allocs,
&bp_copy);
}
}
/*
* The db_rwlock prevents dbuf_read_impl() from
* dereferencing the BP while we are changing it. To
* avoid lock contention, only grab it when we are actually
* changing the BP.
*/
if (rw != NULL)
rw_enter(rw, RW_WRITER);
*bp = bp_copy;
if (rw != NULL)
rw_exit(rw);
}
}
/*
* Remap any existing BP's to concrete vdevs, if possible.
*/
static void
dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
{
spa_t *spa = dmu_objset_spa(db->db_objset);
ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
return;
if (db->db_level > 0) {
blkptr_t *bp = db->db.db_data;
for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx);
}
} else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
dnode_phys_t *dnp = db->db.db_data;
ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==,
DMU_OT_DNODE);
for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
i += dnp[i].dn_extra_slots + 1) {
for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL :
&dn->dn_dbuf->db_rwlock);
dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock,
tx);
}
}
}
}
/* Issue I/O to commit a dirty buffer to disk. */
static void
dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
dnode_t *dn = dr->dr_dnode;
objset_t *os;
dmu_buf_impl_t *parent = db->db_parent;
uint64_t txg = tx->tx_txg;
zbookmark_phys_t zb;
zio_prop_t zp;
zio_t *pio; /* parent I/O */
int wp_flag = 0;
ASSERT(dmu_tx_is_syncing(tx));
os = dn->dn_objset;
if (db->db_state != DB_NOFILL) {
if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
/*
* Private object buffers are released here rather
* than in dbuf_dirty() since they are only modified
* in the syncing context and we don't want the
* overhead of making multiple copies of the data.
*/
if (BP_IS_HOLE(db->db_blkptr)) {
arc_buf_thaw(data);
} else {
dbuf_release_bp(db);
}
dbuf_remap(dn, db, tx);
}
}
if (parent != dn->dn_dbuf) {
/* Our parent is an indirect block. */
/* We have a dirty parent that has been scheduled for write. */
ASSERT(parent && parent->db_data_pending);
/* Our parent's buffer is one level closer to the dnode. */
ASSERT(db->db_level == parent->db_level-1);
/*
* We're about to modify our parent's db_data by modifying
* our block pointer, so the parent must be released.
*/
ASSERT(arc_released(parent->db_buf));
pio = parent->db_data_pending->dr_zio;
} else {
/* Our parent is the dnode itself. */
ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
db->db_blkid != DMU_SPILL_BLKID) ||
(db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
if (db->db_blkid != DMU_SPILL_BLKID)
ASSERT3P(db->db_blkptr, ==,
&dn->dn_phys->dn_blkptr[db->db_blkid]);
pio = dn->dn_zio;
}
ASSERT(db->db_level == 0 || data == db->db_buf);
ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
ASSERT(pio);
SET_BOOKMARK(&zb, os->os_dsl_dataset ?
os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
db->db.db_object, db->db_level, db->db_blkid);
if (db->db_blkid == DMU_SPILL_BLKID)
wp_flag = WP_SPILL;
wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
/*
* We copy the blkptr now (rather than when we instantiate the dirty
* record), because its value can change between open context and
* syncing context. We do not need to hold dn_struct_rwlock to read
* db_blkptr because we are in syncing context.
*/
dr->dr_bp_copy = *db->db_blkptr;
if (db->db_level == 0 &&
dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
/*
* The BP for this block has been provided by open context
* (by dmu_sync() or dmu_buf_write_embedded()).
*/
abd_t *contents = (data != NULL) ?
abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy,
contents, db->db.db_size, db->db.db_size, &zp,
dbuf_write_override_ready, NULL,
dbuf_write_override_done,
dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
mutex_enter(&db->db_mtx);
dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite,
dr->dt.dl.dr_brtwrite);
mutex_exit(&db->db_mtx);
} else if (db->db_state == DB_NOFILL) {
ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
dr->dr_zio = zio_write(pio, os->os_spa, txg,
&dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
dbuf_write_nofill_ready, NULL,
dbuf_write_nofill_done, db,
ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
} else {
ASSERT(arc_released(data));
/*
* For indirect blocks, we want to setup the children
* ready callback so that we can properly handle an indirect
* block that only contains holes.
*/
arc_write_done_func_t *children_ready_cb = NULL;
if (db->db_level != 0)
children_ready_cb = dbuf_write_children_ready;
dr->dr_zio = arc_write(pio, os->os_spa, txg,
&dr->dr_bp_copy, data, !DBUF_IS_CACHEABLE(db),
dbuf_is_l2cacheable(db), &zp, dbuf_write_ready,
children_ready_cb, dbuf_write_done, db,
ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
}
}
EXPORT_SYMBOL(dbuf_find);
EXPORT_SYMBOL(dbuf_is_metadata);
EXPORT_SYMBOL(dbuf_destroy);
EXPORT_SYMBOL(dbuf_loan_arcbuf);
EXPORT_SYMBOL(dbuf_whichblock);
EXPORT_SYMBOL(dbuf_read);
EXPORT_SYMBOL(dbuf_unoverride);
EXPORT_SYMBOL(dbuf_free_range);
EXPORT_SYMBOL(dbuf_new_size);
EXPORT_SYMBOL(dbuf_release_bp);
EXPORT_SYMBOL(dbuf_dirty);
EXPORT_SYMBOL(dmu_buf_set_crypt_params);
EXPORT_SYMBOL(dmu_buf_will_dirty);
EXPORT_SYMBOL(dmu_buf_is_dirty);
EXPORT_SYMBOL(dmu_buf_will_clone);
EXPORT_SYMBOL(dmu_buf_will_not_fill);
EXPORT_SYMBOL(dmu_buf_will_fill);
EXPORT_SYMBOL(dmu_buf_fill_done);
EXPORT_SYMBOL(dmu_buf_rele);
EXPORT_SYMBOL(dbuf_assign_arcbuf);
EXPORT_SYMBOL(dbuf_prefetch);
EXPORT_SYMBOL(dbuf_hold_impl);
EXPORT_SYMBOL(dbuf_hold);
EXPORT_SYMBOL(dbuf_hold_level);
EXPORT_SYMBOL(dbuf_create_bonus);
EXPORT_SYMBOL(dbuf_spill_set_blksz);
EXPORT_SYMBOL(dbuf_rm_spill);
EXPORT_SYMBOL(dbuf_add_ref);
EXPORT_SYMBOL(dbuf_rele);
EXPORT_SYMBOL(dbuf_rele_and_unlock);
EXPORT_SYMBOL(dbuf_refcount);
EXPORT_SYMBOL(dbuf_sync_list);
EXPORT_SYMBOL(dmu_buf_set_user);
EXPORT_SYMBOL(dmu_buf_set_user_ie);
EXPORT_SYMBOL(dmu_buf_get_user);
EXPORT_SYMBOL(dmu_buf_get_blkptr);
ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, U64, ZMOD_RW,
"Maximum size in bytes of the dbuf cache.");
ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW,
"Percentage over dbuf_cache_max_bytes for direct dbuf eviction.");
ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW,
"Percentage below dbuf_cache_max_bytes when dbuf eviction stops.");
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, U64, ZMOD_RW,
"Maximum size in bytes of dbuf metadata cache.");
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, UINT, ZMOD_RW,
"Set size of dbuf cache to log2 fraction of arc size.");
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, UINT, ZMOD_RW,
"Set size of dbuf metadata cache to log2 fraction of arc size.");
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, mutex_cache_shift, UINT, ZMOD_RD,
"Set size of dbuf cache mutex array as log2 shift.");
diff --git a/sys/contrib/openzfs/module/zfs/dmu.c b/sys/contrib/openzfs/module/zfs/dmu.c
index dda869287c78..ddb29020b09b 100644
--- a/sys/contrib/openzfs/module/zfs/dmu.c
+++ b/sys/contrib/openzfs/module/zfs/dmu.c
@@ -1,2560 +1,2576 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright (c) 2016, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
* Copyright (c) 2019 Datto Inc.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2022 Hewlett Packard Enterprise Development LP.
* Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
*/
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_prop.h>
#include <sys/dmu_zfetch.h>
#include <sys/zfs_ioctl.h>
#include <sys/zap.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/sa.h>
#include <sys/zfeature.h>
#include <sys/abd.h>
#include <sys/brt.h>
#include <sys/trace_zfs.h>
#include <sys/zfs_racct.h>
#include <sys/zfs_rlock.h>
#ifdef _KERNEL
#include <sys/vmsystm.h>
#include <sys/zfs_znode.h>
#endif
/*
* Enable/disable nopwrite feature.
*/
static int zfs_nopwrite_enabled = 1;
/*
* Tunable to control percentage of dirtied L1 blocks from frees allowed into
* one TXG. After this threshold is crossed, additional dirty blocks from frees
* will wait until the next TXG.
* A value of zero will disable this throttle.
*/
static uint_t zfs_per_txg_dirty_frees_percent = 30;
/*
* Enable/disable forcing txg sync when dirty checking for holes with lseek().
* By default this is enabled to ensure accurate hole reporting, it can result
* in a significant performance penalty for lseek(SEEK_HOLE) heavy workloads.
* Disabling this option will result in holes never being reported in dirty
* files which is always safe.
*/
static int zfs_dmu_offset_next_sync = 1;
/*
* Limit the amount we can prefetch with one call to this amount. This
* helps to limit the amount of memory that can be used by prefetching.
* Larger objects should be prefetched a bit at a time.
*/
+#ifdef _ILP32
+uint_t dmu_prefetch_max = 8 * 1024 * 1024;
+#else
uint_t dmu_prefetch_max = 8 * SPA_MAXBLOCKSIZE;
+#endif
const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
{DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "unallocated" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "object directory" },
{DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "object array" },
{DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "packed nvlist" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "packed nvlist size" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj header" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map header" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map" },
{DMU_BSWAP_UINT64, TRUE, FALSE, TRUE, "ZIL intent log" },
{DMU_BSWAP_DNODE, TRUE, FALSE, TRUE, "DMU dnode" },
{DMU_BSWAP_OBJSET, TRUE, TRUE, FALSE, "DMU objset" },
{DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL directory" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL directory child map"},
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset snap map" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL props" },
{DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL dataset" },
{DMU_BSWAP_ZNODE, TRUE, FALSE, FALSE, "ZFS znode" },
{DMU_BSWAP_OLDACL, TRUE, FALSE, TRUE, "ZFS V0 ACL" },
{DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "ZFS plain file" },
{DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS directory" },
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "ZFS master node" },
{DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS delete queue" },
{DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "zvol object" },
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "zvol prop" },
{DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "other uint8[]" },
{DMU_BSWAP_UINT64, FALSE, FALSE, TRUE, "other uint64[]" },
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "other ZAP" },
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "persistent error log" },
{DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "SPA history" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA history offsets" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "Pool properties" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL permissions" },
{DMU_BSWAP_ACL, TRUE, FALSE, TRUE, "ZFS ACL" },
{DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "ZFS SYSACL" },
{DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "FUID table" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "FUID table size" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset next clones"},
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan work queue" },
{DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project used" },
{DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project quota"},
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "snapshot refcount tags"},
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT ZAP algorithm" },
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT statistics" },
{DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "System attributes" },
{DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA master node" },
{DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr registration" },
{DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr layouts" },
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan translations" },
{DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "deduplicated block" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL deadlist map" },
{DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL deadlist map hdr" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dir clones" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj subobj" }
};
dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = {
{ byteswap_uint8_array, "uint8" },
{ byteswap_uint16_array, "uint16" },
{ byteswap_uint32_array, "uint32" },
{ byteswap_uint64_array, "uint64" },
{ zap_byteswap, "zap" },
{ dnode_buf_byteswap, "dnode" },
{ dmu_objset_byteswap, "objset" },
{ zfs_znode_byteswap, "znode" },
{ zfs_oldacl_byteswap, "oldacl" },
{ zfs_acl_byteswap, "acl" }
};
-static int
+int
dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset,
const void *tag, dmu_buf_t **dbp)
{
uint64_t blkid;
dmu_buf_impl_t *db;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
blkid = dbuf_whichblock(dn, 0, offset);
db = dbuf_hold(dn, blkid, tag);
rw_exit(&dn->dn_struct_rwlock);
if (db == NULL) {
*dbp = NULL;
return (SET_ERROR(EIO));
}
*dbp = &db->db;
return (0);
}
+
int
dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset,
const void *tag, dmu_buf_t **dbp)
{
dnode_t *dn;
uint64_t blkid;
dmu_buf_impl_t *db;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
blkid = dbuf_whichblock(dn, 0, offset);
db = dbuf_hold(dn, blkid, tag);
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
if (db == NULL) {
*dbp = NULL;
return (SET_ERROR(EIO));
}
*dbp = &db->db;
return (err);
}
int
dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset,
const void *tag, dmu_buf_t **dbp, int flags)
{
int err;
int db_flags = DB_RF_CANFAIL;
if (flags & DMU_READ_NO_PREFETCH)
db_flags |= DB_RF_NOPREFETCH;
if (flags & DMU_READ_NO_DECRYPT)
db_flags |= DB_RF_NO_DECRYPT;
err = dmu_buf_hold_noread_by_dnode(dn, offset, tag, dbp);
if (err == 0) {
dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
err = dbuf_read(db, NULL, db_flags);
if (err != 0) {
dbuf_rele(db, tag);
*dbp = NULL;
}
}
return (err);
}
int
dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
const void *tag, dmu_buf_t **dbp, int flags)
{
int err;
int db_flags = DB_RF_CANFAIL;
if (flags & DMU_READ_NO_PREFETCH)
db_flags |= DB_RF_NOPREFETCH;
if (flags & DMU_READ_NO_DECRYPT)
db_flags |= DB_RF_NO_DECRYPT;
err = dmu_buf_hold_noread(os, object, offset, tag, dbp);
if (err == 0) {
dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
err = dbuf_read(db, NULL, db_flags);
if (err != 0) {
dbuf_rele(db, tag);
*dbp = NULL;
}
}
return (err);
}
int
dmu_bonus_max(void)
{
return (DN_OLD_MAX_BONUSLEN);
}
int
dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
int error;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (dn->dn_bonus != db) {
error = SET_ERROR(EINVAL);
} else if (newsize < 0 || newsize > db_fake->db_size) {
error = SET_ERROR(EINVAL);
} else {
dnode_setbonuslen(dn, newsize, tx);
error = 0;
}
DB_DNODE_EXIT(db);
return (error);
}
int
dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
int error;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (!DMU_OT_IS_VALID(type)) {
error = SET_ERROR(EINVAL);
} else if (dn->dn_bonus != db) {
error = SET_ERROR(EINVAL);
} else {
dnode_setbonus_type(dn, type, tx);
error = 0;
}
DB_DNODE_EXIT(db);
return (error);
}
dmu_object_type_t
dmu_get_bonustype(dmu_buf_t *db_fake)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
dmu_object_type_t type;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
type = dn->dn_bonustype;
DB_DNODE_EXIT(db);
return (type);
}
int
dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx)
{
dnode_t *dn;
int error;
error = dnode_hold(os, object, FTAG, &dn);
dbuf_rm_spill(dn, tx);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dnode_rm_spill(dn, tx);
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
return (error);
}
/*
* Lookup and hold the bonus buffer for the provided dnode. If the dnode
* has not yet been allocated a new bonus dbuf a will be allocated.
* Returns ENOENT, EIO, or 0.
*/
int dmu_bonus_hold_by_dnode(dnode_t *dn, const void *tag, dmu_buf_t **dbp,
uint32_t flags)
{
dmu_buf_impl_t *db;
int error;
uint32_t db_flags = DB_RF_MUST_SUCCEED;
if (flags & DMU_READ_NO_PREFETCH)
db_flags |= DB_RF_NOPREFETCH;
if (flags & DMU_READ_NO_DECRYPT)
db_flags |= DB_RF_NO_DECRYPT;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_bonus == NULL) {
if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
rw_exit(&dn->dn_struct_rwlock);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
}
if (dn->dn_bonus == NULL)
dbuf_create_bonus(dn);
}
db = dn->dn_bonus;
/* as long as the bonus buf is held, the dnode will be held */
if (zfs_refcount_add(&db->db_holds, tag) == 1) {
VERIFY(dnode_add_ref(dn, db));
atomic_inc_32(&dn->dn_dbufs_count);
}
/*
* Wait to drop dn_struct_rwlock until after adding the bonus dbuf's
* hold and incrementing the dbuf count to ensure that dnode_move() sees
* a dnode hold for every dbuf.
*/
rw_exit(&dn->dn_struct_rwlock);
error = dbuf_read(db, NULL, db_flags);
if (error) {
dnode_evict_bonus(dn);
dbuf_rele(db, tag);
*dbp = NULL;
return (error);
}
*dbp = &db->db;
return (0);
}
int
dmu_bonus_hold(objset_t *os, uint64_t object, const void *tag, dmu_buf_t **dbp)
{
dnode_t *dn;
int error;
error = dnode_hold(os, object, FTAG, &dn);
if (error)
return (error);
error = dmu_bonus_hold_by_dnode(dn, tag, dbp, DMU_READ_NO_PREFETCH);
dnode_rele(dn, FTAG);
return (error);
}
/*
* returns ENOENT, EIO, or 0.
*
* This interface will allocate a blank spill dbuf when a spill blk
* doesn't already exist on the dnode.
*
* if you only want to find an already existing spill db, then
* dmu_spill_hold_existing() should be used.
*/
int
dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, const void *tag,
dmu_buf_t **dbp)
{
dmu_buf_impl_t *db = NULL;
int err;
if ((flags & DB_RF_HAVESTRUCT) == 0)
rw_enter(&dn->dn_struct_rwlock, RW_READER);
db = dbuf_hold(dn, DMU_SPILL_BLKID, tag);
if ((flags & DB_RF_HAVESTRUCT) == 0)
rw_exit(&dn->dn_struct_rwlock);
if (db == NULL) {
*dbp = NULL;
return (SET_ERROR(EIO));
}
err = dbuf_read(db, NULL, flags);
if (err == 0)
*dbp = &db->db;
else {
dbuf_rele(db, tag);
*dbp = NULL;
}
return (err);
}
int
dmu_spill_hold_existing(dmu_buf_t *bonus, const void *tag, dmu_buf_t **dbp)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
dnode_t *dn;
int err;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) {
err = SET_ERROR(EINVAL);
} else {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (!dn->dn_have_spill) {
err = SET_ERROR(ENOENT);
} else {
err = dmu_spill_hold_by_dnode(dn,
DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp);
}
rw_exit(&dn->dn_struct_rwlock);
}
DB_DNODE_EXIT(db);
return (err);
}
int
dmu_spill_hold_by_bonus(dmu_buf_t *bonus, uint32_t flags, const void *tag,
dmu_buf_t **dbp)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
dnode_t *dn;
int err;
uint32_t db_flags = DB_RF_CANFAIL;
if (flags & DMU_READ_NO_DECRYPT)
db_flags |= DB_RF_NO_DECRYPT;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
err = dmu_spill_hold_by_dnode(dn, db_flags, tag, dbp);
DB_DNODE_EXIT(db);
return (err);
}
/*
* Note: longer-term, we should modify all of the dmu_buf_*() interfaces
* to take a held dnode rather than <os, object> -- the lookup is wasteful,
* and can induce severe lock contention when writing to several files
* whose dnodes are in the same block.
*/
int
dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
boolean_t read, const void *tag, int *numbufsp, dmu_buf_t ***dbpp,
uint32_t flags)
{
dmu_buf_t **dbp;
zstream_t *zs = NULL;
uint64_t blkid, nblks, i;
uint32_t dbuf_flags;
int err;
zio_t *zio = NULL;
boolean_t missed = B_FALSE;
ASSERT(!read || length <= DMU_MAX_ACCESS);
/*
* Note: We directly notify the prefetch code of this read, so that
* we can tell it about the multi-block read. dbuf_read() only knows
* about the one block it is accessing.
*/
dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT |
DB_RF_NOPREFETCH;
if ((flags & DMU_READ_NO_DECRYPT) != 0)
dbuf_flags |= DB_RF_NO_DECRYPT;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_datablkshift) {
int blkshift = dn->dn_datablkshift;
nblks = (P2ROUNDUP(offset + length, 1ULL << blkshift) -
P2ALIGN(offset, 1ULL << blkshift)) >> blkshift;
} else {
if (offset + length > dn->dn_datablksz) {
zfs_panic_recover("zfs: accessing past end of object "
"%llx/%llx (size=%u access=%llu+%llu)",
(longlong_t)dn->dn_objset->
os_dsl_dataset->ds_object,
(longlong_t)dn->dn_object, dn->dn_datablksz,
(longlong_t)offset, (longlong_t)length);
rw_exit(&dn->dn_struct_rwlock);
return (SET_ERROR(EIO));
}
nblks = 1;
}
dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP);
if (read)
zio = zio_root(dn->dn_objset->os_spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
blkid = dbuf_whichblock(dn, 0, offset);
- if ((flags & DMU_READ_NO_PREFETCH) == 0 &&
- length <= zfetch_array_rd_sz) {
+ if ((flags & DMU_READ_NO_PREFETCH) == 0) {
/*
* Prepare the zfetch before initiating the demand reads, so
* that if multiple threads block on same indirect block, we
* base predictions on the original less racy request order.
*/
zs = dmu_zfetch_prepare(&dn->dn_zfetch, blkid, nblks, read,
B_TRUE);
}
for (i = 0; i < nblks; i++) {
dmu_buf_impl_t *db = dbuf_hold(dn, blkid + i, tag);
if (db == NULL) {
if (zs)
dmu_zfetch_run(zs, missed, B_TRUE);
rw_exit(&dn->dn_struct_rwlock);
dmu_buf_rele_array(dbp, nblks, tag);
if (read)
zio_nowait(zio);
return (SET_ERROR(EIO));
}
/*
* Initiate async demand data read.
* We check the db_state after calling dbuf_read() because
* (1) dbuf_read() may change the state to CACHED due to a
* hit in the ARC, and (2) on a cache miss, a child will
* have been added to "zio" but not yet completed, so the
* state will not yet be CACHED.
*/
if (read) {
if (i == nblks - 1 && blkid + i < dn->dn_maxblkid &&
offset + length < db->db.db_offset +
db->db.db_size) {
if (offset <= db->db.db_offset)
dbuf_flags |= DB_RF_PARTIAL_FIRST;
else
dbuf_flags |= DB_RF_PARTIAL_MORE;
}
(void) dbuf_read(db, zio, dbuf_flags);
if (db->db_state != DB_CACHED)
missed = B_TRUE;
}
dbp[i] = &db->db;
}
if (!read)
zfs_racct_write(length, nblks);
if (zs)
dmu_zfetch_run(zs, missed, B_TRUE);
rw_exit(&dn->dn_struct_rwlock);
if (read) {
/* wait for async read i/o */
err = zio_wait(zio);
if (err) {
dmu_buf_rele_array(dbp, nblks, tag);
return (err);
}
/* wait for other io to complete */
for (i = 0; i < nblks; i++) {
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i];
mutex_enter(&db->db_mtx);
while (db->db_state == DB_READ ||
db->db_state == DB_FILL)
cv_wait(&db->db_changed, &db->db_mtx);
if (db->db_state == DB_UNCACHED)
err = SET_ERROR(EIO);
mutex_exit(&db->db_mtx);
if (err) {
dmu_buf_rele_array(dbp, nblks, tag);
return (err);
}
}
}
*numbufsp = nblks;
*dbpp = dbp;
return (0);
}
int
dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
uint64_t length, int read, const void *tag, int *numbufsp,
dmu_buf_t ***dbpp)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
numbufsp, dbpp, DMU_READ_PREFETCH);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset,
uint64_t length, boolean_t read, const void *tag, int *numbufsp,
dmu_buf_t ***dbpp)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
int err;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
numbufsp, dbpp, DMU_READ_PREFETCH);
DB_DNODE_EXIT(db);
return (err);
}
void
dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, const void *tag)
{
int i;
dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake;
if (numbufs == 0)
return;
for (i = 0; i < numbufs; i++) {
if (dbp[i])
dbuf_rele(dbp[i], tag);
}
kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs);
}
/*
* Issue prefetch i/os for the given blocks. If level is greater than 0, the
* indirect blocks prefetched will be those that point to the blocks containing
* the data starting at offset, and continuing to offset + len.
*
* Note that if the indirect blocks above the blocks being prefetched are not
* in cache, they will be asynchronously read in.
*/
void
dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
uint64_t len, zio_priority_t pri)
{
dnode_t *dn;
uint64_t blkid;
int nblks, err;
if (len == 0) { /* they're interested in the bonus buffer */
dn = DMU_META_DNODE(os);
if (object == 0 || object >= DN_MAX_OBJECT)
return;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
blkid = dbuf_whichblock(dn, level,
object * sizeof (dnode_phys_t));
dbuf_prefetch(dn, level, blkid, pri, 0);
rw_exit(&dn->dn_struct_rwlock);
return;
}
/*
* See comment before the definition of dmu_prefetch_max.
*/
len = MIN(len, dmu_prefetch_max);
/*
* XXX - Note, if the dnode for the requested object is not
* already cached, we will do a *synchronous* read in the
* dnode_hold() call. The same is true for any indirects.
*/
err = dnode_hold(os, object, FTAG, &dn);
if (err != 0)
return;
/*
* offset + len - 1 is the last byte we want to prefetch for, and offset
* is the first. Then dbuf_whichblk(dn, level, off + len - 1) is the
* last block we want to prefetch, and dbuf_whichblock(dn, level,
* offset) is the first. Then the number we need to prefetch is the
* last - first + 1.
*/
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (level > 0 || dn->dn_datablkshift != 0) {
nblks = dbuf_whichblock(dn, level, offset + len - 1) -
dbuf_whichblock(dn, level, offset) + 1;
} else {
nblks = (offset < dn->dn_datablksz);
}
if (nblks != 0) {
blkid = dbuf_whichblock(dn, level, offset);
for (int i = 0; i < nblks; i++)
dbuf_prefetch(dn, level, blkid + i, pri, 0);
}
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
}
/*
* Get the next "chunk" of file data to free. We traverse the file from
* the end so that the file gets shorter over time (if we crashes in the
* middle, this will leave us in a better state). We find allocated file
* data by simply searching the allocated level 1 indirects.
*
* On input, *start should be the first offset that does not need to be
* freed (e.g. "offset + length"). On return, *start will be the first
* offset that should be freed and l1blks is set to the number of level 1
* indirect blocks found within the chunk.
*/
static int
get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum, uint64_t *l1blks)
{
uint64_t blks;
uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1);
/* bytes of data covered by a level-1 indirect block */
uint64_t iblkrange = (uint64_t)dn->dn_datablksz *
EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT);
ASSERT3U(minimum, <=, *start);
/*
* Check if we can free the entire range assuming that all of the
* L1 blocks in this range have data. If we can, we use this
* worst case value as an estimate so we can avoid having to look
* at the object's actual data.
*/
uint64_t total_l1blks =
(roundup(*start, iblkrange) - (minimum / iblkrange * iblkrange)) /
iblkrange;
if (total_l1blks <= maxblks) {
*l1blks = total_l1blks;
*start = minimum;
return (0);
}
ASSERT(ISP2(iblkrange));
for (blks = 0; *start > minimum && blks < maxblks; blks++) {
int err;
/*
* dnode_next_offset(BACKWARDS) will find an allocated L1
* indirect block at or before the input offset. We must
* decrement *start so that it is at the end of the region
* to search.
*/
(*start)--;
err = dnode_next_offset(dn,
DNODE_FIND_BACKWARDS, start, 2, 1, 0);
/* if there are no indirect blocks before start, we are done */
if (err == ESRCH) {
*start = minimum;
break;
} else if (err != 0) {
*l1blks = blks;
return (err);
}
/* set start to the beginning of this L1 indirect */
*start = P2ALIGN(*start, iblkrange);
}
if (*start < minimum)
*start = minimum;
*l1blks = blks;
return (0);
}
/*
* If this objset is of type OST_ZFS return true if vfs's unmounted flag is set,
* otherwise return false.
* Used below in dmu_free_long_range_impl() to enable abort when unmounting
*/
static boolean_t
dmu_objset_zfs_unmounting(objset_t *os)
{
#ifdef _KERNEL
if (dmu_objset_type(os) == DMU_OST_ZFS)
return (zfs_get_vfs_flag_unmounted(os));
#else
(void) os;
#endif
return (B_FALSE);
}
static int
dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
uint64_t length)
{
uint64_t object_size;
int err;
uint64_t dirty_frees_threshold;
dsl_pool_t *dp = dmu_objset_pool(os);
if (dn == NULL)
return (SET_ERROR(EINVAL));
object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
if (offset >= object_size)
return (0);
if (zfs_per_txg_dirty_frees_percent <= 100)
dirty_frees_threshold =
zfs_per_txg_dirty_frees_percent * zfs_dirty_data_max / 100;
else
dirty_frees_threshold = zfs_dirty_data_max / 20;
if (length == DMU_OBJECT_END || offset + length > object_size)
length = object_size - offset;
while (length != 0) {
uint64_t chunk_end, chunk_begin, chunk_len;
uint64_t l1blks;
dmu_tx_t *tx;
if (dmu_objset_zfs_unmounting(dn->dn_objset))
return (SET_ERROR(EINTR));
chunk_end = chunk_begin = offset + length;
/* move chunk_begin backwards to the beginning of this chunk */
err = get_next_chunk(dn, &chunk_begin, offset, &l1blks);
if (err)
return (err);
ASSERT3U(chunk_begin, >=, offset);
ASSERT3U(chunk_begin, <=, chunk_end);
chunk_len = chunk_end - chunk_begin;
tx = dmu_tx_create(os);
dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len);
/*
* Mark this transaction as typically resulting in a net
* reduction in space used.
*/
dmu_tx_mark_netfree(tx);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err) {
dmu_tx_abort(tx);
return (err);
}
uint64_t txg = dmu_tx_get_txg(tx);
mutex_enter(&dp->dp_lock);
uint64_t long_free_dirty =
dp->dp_long_free_dirty_pertxg[txg & TXG_MASK];
mutex_exit(&dp->dp_lock);
/*
* To avoid filling up a TXG with just frees, wait for
* the next TXG to open before freeing more chunks if
* we have reached the threshold of frees.
*/
if (dirty_frees_threshold != 0 &&
long_free_dirty >= dirty_frees_threshold) {
DMU_TX_STAT_BUMP(dmu_tx_dirty_frees_delay);
dmu_tx_commit(tx);
txg_wait_open(dp, 0, B_TRUE);
continue;
}
/*
* In order to prevent unnecessary write throttling, for each
* TXG, we track the cumulative size of L1 blocks being dirtied
* in dnode_free_range() below. We compare this number to a
* tunable threshold, past which we prevent new L1 dirty freeing
* blocks from being added into the open TXG. See
* dmu_free_long_range_impl() for details. The threshold
* prevents write throttle activation due to dirty freeing L1
* blocks taking up a large percentage of zfs_dirty_data_max.
*/
mutex_enter(&dp->dp_lock);
dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] +=
l1blks << dn->dn_indblkshift;
mutex_exit(&dp->dp_lock);
DTRACE_PROBE3(free__long__range,
uint64_t, long_free_dirty, uint64_t, chunk_len,
uint64_t, txg);
dnode_free_range(dn, chunk_begin, chunk_len, tx);
dmu_tx_commit(tx);
length -= chunk_len;
}
return (0);
}
int
dmu_free_long_range(objset_t *os, uint64_t object,
uint64_t offset, uint64_t length)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err != 0)
return (err);
err = dmu_free_long_range_impl(os, dn, offset, length);
/*
* It is important to zero out the maxblkid when freeing the entire
* file, so that (a) subsequent calls to dmu_free_long_range_impl()
* will take the fast path, and (b) dnode_reallocate() can verify
* that the entire file has been freed.
*/
if (err == 0 && offset == 0 && length == DMU_OBJECT_END)
dn->dn_maxblkid = 0;
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_free_long_object(objset_t *os, uint64_t object)
{
dmu_tx_t *tx;
int err;
err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END);
if (err != 0)
return (err);
tx = dmu_tx_create(os);
dmu_tx_hold_bonus(tx, object);
dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
dmu_tx_mark_netfree(tx);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err == 0) {
err = dmu_object_free(os, object, tx);
dmu_tx_commit(tx);
} else {
dmu_tx_abort(tx);
}
return (err);
}
int
dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
uint64_t size, dmu_tx_t *tx)
{
dnode_t *dn;
int err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
ASSERT(offset < UINT64_MAX);
ASSERT(size == DMU_OBJECT_END || size <= UINT64_MAX - offset);
dnode_free_range(dn, offset, size, tx);
dnode_rele(dn, FTAG);
return (0);
}
static int
dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size,
void *buf, uint32_t flags)
{
dmu_buf_t **dbp;
int numbufs, err = 0;
/*
* Deal with odd block sizes, where there can't be data past the first
* block. If we ever do the tail block optimization, we will need to
* handle that here as well.
*/
if (dn->dn_maxblkid == 0) {
uint64_t newsz = offset > dn->dn_datablksz ? 0 :
MIN(size, dn->dn_datablksz - offset);
memset((char *)buf + newsz, 0, size - newsz);
size = newsz;
}
while (size > 0) {
uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2);
int i;
/*
* NB: we could do this block-at-a-time, but it's nice
* to be reading in parallel.
*/
err = dmu_buf_hold_array_by_dnode(dn, offset, mylen,
TRUE, FTAG, &numbufs, &dbp, flags);
if (err)
break;
for (i = 0; i < numbufs; i++) {
uint64_t tocpy;
int64_t bufoff;
dmu_buf_t *db = dbp[i];
ASSERT(size > 0);
bufoff = offset - db->db_offset;
tocpy = MIN(db->db_size - bufoff, size);
(void) memcpy(buf, (char *)db->db_data + bufoff, tocpy);
offset += tocpy;
size -= tocpy;
buf = (char *)buf + tocpy;
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
return (err);
}
int
dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
void *buf, uint32_t flags)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err != 0)
return (err);
err = dmu_read_impl(dn, offset, size, buf, flags);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf,
uint32_t flags)
{
return (dmu_read_impl(dn, offset, size, buf, flags));
}
static void
dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size,
const void *buf, dmu_tx_t *tx)
{
int i;
for (i = 0; i < numbufs; i++) {
uint64_t tocpy;
int64_t bufoff;
dmu_buf_t *db = dbp[i];
ASSERT(size > 0);
bufoff = offset - db->db_offset;
tocpy = MIN(db->db_size - bufoff, size);
ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
if (tocpy == db->db_size)
dmu_buf_will_fill(db, tx);
else
dmu_buf_will_dirty(db, tx);
(void) memcpy((char *)db->db_data + bufoff, buf, tocpy);
if (tocpy == db->db_size)
dmu_buf_fill_done(db, tx);
offset += tocpy;
size -= tocpy;
buf = (char *)buf + tocpy;
}
}
void
dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
const void *buf, dmu_tx_t *tx)
{
dmu_buf_t **dbp;
int numbufs;
if (size == 0)
return;
VERIFY0(dmu_buf_hold_array(os, object, offset, size,
FALSE, FTAG, &numbufs, &dbp));
dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
/*
* Note: Lustre is an external consumer of this interface.
*/
void
dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
const void *buf, dmu_tx_t *tx)
{
dmu_buf_t **dbp;
int numbufs;
if (size == 0)
return;
VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size,
FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH));
dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
void
dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
dmu_tx_t *tx)
{
dmu_buf_t **dbp;
int numbufs, i;
if (size == 0)
return;
VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
FALSE, FTAG, &numbufs, &dbp));
for (i = 0; i < numbufs; i++) {
dmu_buf_t *db = dbp[i];
dmu_buf_will_not_fill(db, tx);
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
void
dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset,
void *data, uint8_t etype, uint8_t comp, int uncompressed_size,
int compressed_size, int byteorder, dmu_tx_t *tx)
{
dmu_buf_t *db;
ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES);
ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS);
VERIFY0(dmu_buf_hold_noread(os, object, offset,
FTAG, &db));
dmu_buf_write_embedded(db,
data, (bp_embedded_type_t)etype, (enum zio_compress)comp,
uncompressed_size, compressed_size, byteorder, tx);
dmu_buf_rele(db, FTAG);
}
void
dmu_redact(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
dmu_tx_t *tx)
{
int numbufs, i;
dmu_buf_t **dbp;
VERIFY0(dmu_buf_hold_array(os, object, offset, size, FALSE, FTAG,
&numbufs, &dbp));
for (i = 0; i < numbufs; i++)
dmu_buf_redact(dbp[i], tx);
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
#ifdef _KERNEL
int
dmu_read_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size)
{
dmu_buf_t **dbp;
int numbufs, i, err;
/*
* NB: we could do this block-at-a-time, but it's nice
* to be reading in parallel.
*/
err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size,
TRUE, FTAG, &numbufs, &dbp, 0);
if (err)
return (err);
for (i = 0; i < numbufs; i++) {
uint64_t tocpy;
int64_t bufoff;
dmu_buf_t *db = dbp[i];
ASSERT(size > 0);
bufoff = zfs_uio_offset(uio) - db->db_offset;
tocpy = MIN(db->db_size - bufoff, size);
err = zfs_uio_fault_move((char *)db->db_data + bufoff, tocpy,
UIO_READ, uio);
if (err)
break;
size -= tocpy;
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
return (err);
}
/*
* Read 'size' bytes into the uio buffer.
* From object zdb->db_object.
* Starting at zfs_uio_offset(uio).
*
* If the caller already has a dbuf in the target object
* (e.g. its bonus buffer), this routine is faster than dmu_read_uio(),
* because we don't have to find the dnode_t for the object.
*/
int
dmu_read_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
dnode_t *dn;
int err;
if (size == 0)
return (0);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
err = dmu_read_uio_dnode(dn, uio, size);
DB_DNODE_EXIT(db);
return (err);
}
/*
* Read 'size' bytes into the uio buffer.
* From the specified object
* Starting at offset zfs_uio_offset(uio).
*/
int
dmu_read_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size)
{
dnode_t *dn;
int err;
if (size == 0)
return (0);
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
err = dmu_read_uio_dnode(dn, uio, size);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_write_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size, dmu_tx_t *tx)
{
dmu_buf_t **dbp;
int numbufs;
int err = 0;
int i;
err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size,
FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH);
if (err)
return (err);
for (i = 0; i < numbufs; i++) {
uint64_t tocpy;
int64_t bufoff;
dmu_buf_t *db = dbp[i];
ASSERT(size > 0);
bufoff = zfs_uio_offset(uio) - db->db_offset;
tocpy = MIN(db->db_size - bufoff, size);
ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
if (tocpy == db->db_size)
dmu_buf_will_fill(db, tx);
else
dmu_buf_will_dirty(db, tx);
/*
* XXX zfs_uiomove could block forever (eg.nfs-backed
* pages). There needs to be a uiolockdown() function
* to lock the pages in memory, so that zfs_uiomove won't
* block.
*/
err = zfs_uio_fault_move((char *)db->db_data + bufoff,
tocpy, UIO_WRITE, uio);
if (tocpy == db->db_size)
dmu_buf_fill_done(db, tx);
if (err)
break;
size -= tocpy;
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
return (err);
}
/*
* Write 'size' bytes from the uio buffer.
* To object zdb->db_object.
* Starting at offset zfs_uio_offset(uio).
*
* If the caller already has a dbuf in the target object
* (e.g. its bonus buffer), this routine is faster than dmu_write_uio(),
* because we don't have to find the dnode_t for the object.
*/
int
dmu_write_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
dnode_t *dn;
int err;
if (size == 0)
return (0);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
err = dmu_write_uio_dnode(dn, uio, size, tx);
DB_DNODE_EXIT(db);
return (err);
}
/*
* Write 'size' bytes from the uio buffer.
* To the specified object.
* Starting at offset zfs_uio_offset(uio).
*/
int
dmu_write_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size,
dmu_tx_t *tx)
{
dnode_t *dn;
int err;
if (size == 0)
return (0);
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
err = dmu_write_uio_dnode(dn, uio, size, tx);
dnode_rele(dn, FTAG);
return (err);
}
#endif /* _KERNEL */
/*
* Allocate a loaned anonymous arc buffer.
*/
arc_buf_t *
dmu_request_arcbuf(dmu_buf_t *handle, int size)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle;
return (arc_loan_buf(db->db_objset->os_spa, B_FALSE, size));
}
/*
* Free a loaned arc buffer.
*/
void
dmu_return_arcbuf(arc_buf_t *buf)
{
arc_return_buf(buf, FTAG);
arc_buf_destroy(buf, FTAG);
}
/*
* A "lightweight" write is faster than a regular write (e.g.
* dmu_write_by_dnode() or dmu_assign_arcbuf_by_dnode()), because it avoids the
* CPU cost of creating a dmu_buf_impl_t and arc_buf_[hdr_]_t. However, the
* data can not be read or overwritten until the transaction's txg has been
* synced. This makes it appropriate for workloads that are known to be
* (temporarily) write-only, like "zfs receive".
*
* A single block is written, starting at the specified offset in bytes. If
* the call is successful, it returns 0 and the provided abd has been
* consumed (the caller should not free it).
*/
int
dmu_lightweight_write_by_dnode(dnode_t *dn, uint64_t offset, abd_t *abd,
const zio_prop_t *zp, zio_flag_t flags, dmu_tx_t *tx)
{
dbuf_dirty_record_t *dr =
dbuf_dirty_lightweight(dn, dbuf_whichblock(dn, 0, offset), tx);
if (dr == NULL)
return (SET_ERROR(EIO));
dr->dt.dll.dr_abd = abd;
dr->dt.dll.dr_props = *zp;
dr->dt.dll.dr_flags = flags;
return (0);
}
/*
* When possible directly assign passed loaned arc buffer to a dbuf.
* If this is not possible copy the contents of passed arc buf via
* dmu_write().
*/
int
dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db;
objset_t *os = dn->dn_objset;
uint64_t object = dn->dn_object;
uint32_t blksz = (uint32_t)arc_buf_lsize(buf);
uint64_t blkid;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
blkid = dbuf_whichblock(dn, 0, offset);
db = dbuf_hold(dn, blkid, FTAG);
if (db == NULL)
return (SET_ERROR(EIO));
rw_exit(&dn->dn_struct_rwlock);
/*
* We can only assign if the offset is aligned and the arc buf is the
* same size as the dbuf.
*/
if (offset == db->db.db_offset && blksz == db->db.db_size) {
zfs_racct_write(blksz, 1);
dbuf_assign_arcbuf(db, buf, tx);
dbuf_rele(db, FTAG);
} else {
/* compressed bufs must always be assignable to their dbuf */
ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF);
ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED));
dbuf_rele(db, FTAG);
dmu_write(os, object, offset, blksz, buf->b_data, tx);
dmu_return_arcbuf(buf);
}
return (0);
}
int
dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
dmu_tx_t *tx)
{
int err;
dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle;
DB_DNODE_ENTER(dbuf);
err = dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf), offset, buf, tx);
DB_DNODE_EXIT(dbuf);
return (err);
}
typedef struct {
dbuf_dirty_record_t *dsa_dr;
dmu_sync_cb_t *dsa_done;
zgd_t *dsa_zgd;
dmu_tx_t *dsa_tx;
} dmu_sync_arg_t;
static void
dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg)
{
(void) buf;
dmu_sync_arg_t *dsa = varg;
dmu_buf_t *db = dsa->dsa_zgd->zgd_db;
blkptr_t *bp = zio->io_bp;
if (zio->io_error == 0) {
if (BP_IS_HOLE(bp)) {
/*
* A block of zeros may compress to a hole, but the
* block size still needs to be known for replay.
*/
BP_SET_LSIZE(bp, db->db_size);
} else if (!BP_IS_EMBEDDED(bp)) {
ASSERT(BP_GET_LEVEL(bp) == 0);
BP_SET_FILL(bp, 1);
}
}
}
static void
dmu_sync_late_arrival_ready(zio_t *zio)
{
dmu_sync_ready(zio, NULL, zio->io_private);
}
static void
dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg)
{
(void) buf;
dmu_sync_arg_t *dsa = varg;
dbuf_dirty_record_t *dr = dsa->dsa_dr;
dmu_buf_impl_t *db = dr->dr_dbuf;
zgd_t *zgd = dsa->dsa_zgd;
/*
* Record the vdev(s) backing this blkptr so they can be flushed after
* the writes for the lwb have completed.
*/
if (zio->io_error == 0) {
zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
}
mutex_enter(&db->db_mtx);
ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC);
if (zio->io_error == 0) {
dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE);
if (dr->dt.dl.dr_nopwrite) {
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
uint8_t chksum = BP_GET_CHECKSUM(bp_orig);
ASSERT(BP_EQUAL(bp, bp_orig));
VERIFY(BP_EQUAL(bp, db->db_blkptr));
ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF);
VERIFY(zio_checksum_table[chksum].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE);
}
dr->dt.dl.dr_overridden_by = *zio->io_bp;
dr->dt.dl.dr_override_state = DR_OVERRIDDEN;
dr->dt.dl.dr_copies = zio->io_prop.zp_copies;
/*
* Old style holes are filled with all zeros, whereas
* new-style holes maintain their lsize, type, level,
* and birth time (see zio_write_compress). While we
* need to reset the BP_SET_LSIZE() call that happened
* in dmu_sync_ready for old style holes, we do *not*
* want to wipe out the information contained in new
* style holes. Thus, only zero out the block pointer if
* it's an old style hole.
*/
if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) &&
dr->dt.dl.dr_overridden_by.blk_birth == 0)
BP_ZERO(&dr->dt.dl.dr_overridden_by);
} else {
dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
}
cv_broadcast(&db->db_changed);
mutex_exit(&db->db_mtx);
dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
kmem_free(dsa, sizeof (*dsa));
}
static void
dmu_sync_late_arrival_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
dmu_sync_arg_t *dsa = zio->io_private;
zgd_t *zgd = dsa->dsa_zgd;
if (zio->io_error == 0) {
/*
* Record the vdev(s) backing this blkptr so they can be
* flushed after the writes for the lwb have completed.
*/
zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
if (!BP_IS_HOLE(bp)) {
blkptr_t *bp_orig __maybe_unused = &zio->io_bp_orig;
ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE));
ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig));
ASSERT(zio->io_bp->blk_birth == zio->io_txg);
ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa));
zio_free(zio->io_spa, zio->io_txg, zio->io_bp);
}
}
dmu_tx_commit(dsa->dsa_tx);
dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
abd_free(zio->io_abd);
kmem_free(dsa, sizeof (*dsa));
}
static int
dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
zio_prop_t *zp, zbookmark_phys_t *zb)
{
dmu_sync_arg_t *dsa;
dmu_tx_t *tx;
+ int error;
+
+ error = dbuf_read((dmu_buf_impl_t *)zgd->zgd_db, NULL,
+ DB_RF_CANFAIL | DB_RF_NOPREFETCH);
+ if (error != 0)
+ return (error);
tx = dmu_tx_create(os);
dmu_tx_hold_space(tx, zgd->zgd_db->db_size);
- if (dmu_tx_assign(tx, TXG_WAIT) != 0) {
+ /*
+ * This transaction does not produce any dirty data or log blocks, so
+ * it should not be throttled. All other cases wait for TXG sync, by
+ * which time the log block we are writing will be obsolete, so we can
+ * skip waiting and just return error here instead.
+ */
+ if (dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE) != 0) {
dmu_tx_abort(tx);
/* Make zl_get_data do txg_waited_synced() */
return (SET_ERROR(EIO));
}
/*
* In order to prevent the zgd's lwb from being free'd prior to
* dmu_sync_late_arrival_done() being called, we have to ensure
* the lwb's "max txg" takes this tx's txg into account.
*/
zil_lwb_add_txg(zgd->zgd_lwb, dmu_tx_get_txg(tx));
dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
dsa->dsa_dr = NULL;
dsa->dsa_done = done;
dsa->dsa_zgd = zgd;
dsa->dsa_tx = tx;
/*
* Since we are currently syncing this txg, it's nontrivial to
* determine what BP to nopwrite against, so we disable nopwrite.
*
* When syncing, the db_blkptr is initially the BP of the previous
* txg. We can not nopwrite against it because it will be changed
* (this is similar to the non-late-arrival case where the dbuf is
* dirty in a future txg).
*
* Then dbuf_write_ready() sets bp_blkptr to the location we will write.
* We can not nopwrite against it because although the BP will not
* (typically) be changed, the data has not yet been persisted to this
* location.
*
* Finally, when dbuf_write_done() is called, it is theoretically
* possible to always nopwrite, because the data that was written in
* this txg is the same data that we are trying to write. However we
* would need to check that this dbuf is not dirty in any future
* txg's (as we do in the normal dmu_sync() path). For simplicity, we
* don't nopwrite in this case.
*/
zp->zp_nopwrite = B_FALSE;
zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp,
abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size),
zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp,
dmu_sync_late_arrival_ready, NULL, dmu_sync_late_arrival_done,
dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb));
return (0);
}
/*
* Intent log support: sync the block associated with db to disk.
* N.B. and XXX: the caller is responsible for making sure that the
* data isn't changing while dmu_sync() is writing it.
*
* Return values:
*
* EEXIST: this txg has already been synced, so there's nothing to do.
* The caller should not log the write.
*
* ENOENT: the block was dbuf_free_range()'d, so there's nothing to do.
* The caller should not log the write.
*
* EALREADY: this block is already in the process of being synced.
* The caller should track its progress (somehow).
*
* EIO: could not do the I/O.
* The caller should do a txg_wait_synced().
*
* 0: the I/O has been initiated.
* The caller should log this blkptr in the done callback.
* It is possible that the I/O will fail, in which case
* the error will be reported to the done callback and
* propagated to pio from zio_done().
*/
int
dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db;
objset_t *os = db->db_objset;
dsl_dataset_t *ds = os->os_dsl_dataset;
dbuf_dirty_record_t *dr, *dr_next;
dmu_sync_arg_t *dsa;
zbookmark_phys_t zb;
zio_prop_t zp;
dnode_t *dn;
ASSERT(pio != NULL);
ASSERT(txg != 0);
SET_BOOKMARK(&zb, ds->ds_object,
db->db.db_object, db->db_level, db->db_blkid);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp);
DB_DNODE_EXIT(db);
/*
* If we're frozen (running ziltest), we always need to generate a bp.
*/
if (txg > spa_freeze_txg(os->os_spa))
return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
/*
* Grabbing db_mtx now provides a barrier between dbuf_sync_leaf()
* and us. If we determine that this txg is not yet syncing,
* but it begins to sync a moment later, that's OK because the
* sync thread will block in dbuf_sync_leaf() until we drop db_mtx.
*/
mutex_enter(&db->db_mtx);
if (txg <= spa_last_synced_txg(os->os_spa)) {
/*
* This txg has already synced. There's nothing to do.
*/
mutex_exit(&db->db_mtx);
return (SET_ERROR(EEXIST));
}
if (txg <= spa_syncing_txg(os->os_spa)) {
/*
* This txg is currently syncing, so we can't mess with
* the dirty record anymore; just write a new log block.
*/
mutex_exit(&db->db_mtx);
return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
}
dr = dbuf_find_dirty_eq(db, txg);
if (dr == NULL) {
/*
* There's no dr for this dbuf, so it must have been freed.
* There's no need to log writes to freed blocks, so we're done.
*/
mutex_exit(&db->db_mtx);
return (SET_ERROR(ENOENT));
}
dr_next = list_next(&db->db_dirty_records, dr);
ASSERT(dr_next == NULL || dr_next->dr_txg < txg);
if (db->db_blkptr != NULL) {
/*
* We need to fill in zgd_bp with the current blkptr so that
* the nopwrite code can check if we're writing the same
* data that's already on disk. We can only nopwrite if we
* are sure that after making the copy, db_blkptr will not
* change until our i/o completes. We ensure this by
* holding the db_mtx, and only allowing nopwrite if the
* block is not already dirty (see below). This is verified
* by dmu_sync_done(), which VERIFYs that the db_blkptr has
* not changed.
*/
*zgd->zgd_bp = *db->db_blkptr;
}
/*
* Assume the on-disk data is X, the current syncing data (in
* txg - 1) is Y, and the current in-memory data is Z (currently
* in dmu_sync).
*
* We usually want to perform a nopwrite if X and Z are the
* same. However, if Y is different (i.e. the BP is going to
* change before this write takes effect), then a nopwrite will
* be incorrect - we would override with X, which could have
* been freed when Y was written.
*
* (Note that this is not a concern when we are nop-writing from
* syncing context, because X and Y must be identical, because
* all previous txgs have been synced.)
*
* Therefore, we disable nopwrite if the current BP could change
* before this TXG. There are two ways it could change: by
* being dirty (dr_next is non-NULL), or by being freed
* (dnode_block_freed()). This behavior is verified by
* zio_done(), which VERIFYs that the override BP is identical
* to the on-disk BP.
*/
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (dr_next != NULL || dnode_block_freed(dn, db->db_blkid))
zp.zp_nopwrite = B_FALSE;
DB_DNODE_EXIT(db);
ASSERT(dr->dr_txg == txg);
if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC ||
dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
/*
* We have already issued a sync write for this buffer,
* or this buffer has already been synced. It could not
* have been dirtied since, or we would have cleared the state.
*/
mutex_exit(&db->db_mtx);
return (SET_ERROR(EALREADY));
}
ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
mutex_exit(&db->db_mtx);
dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
dsa->dsa_dr = dr;
dsa->dsa_done = done;
dsa->dsa_zgd = zgd;
dsa->dsa_tx = NULL;
zio_nowait(arc_write(pio, os->os_spa, txg, zgd->zgd_bp,
dr->dt.dl.dr_data, !DBUF_IS_CACHEABLE(db), dbuf_is_l2cacheable(db),
&zp, dmu_sync_ready, NULL, dmu_sync_done, dsa,
ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb));
return (0);
}
int
dmu_object_set_nlevels(objset_t *os, uint64_t object, int nlevels, dmu_tx_t *tx)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
err = dnode_set_nlevels(dn, nlevels, tx);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs,
dmu_tx_t *tx)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
err = dnode_set_blksz(dn, size, ibs, tx);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_object_set_maxblkid(objset_t *os, uint64_t object, uint64_t maxblkid,
dmu_tx_t *tx)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dnode_new_blkid(dn, maxblkid, tx, B_FALSE, B_TRUE);
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
return (0);
}
void
dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
dmu_tx_t *tx)
{
dnode_t *dn;
/*
* Send streams include each object's checksum function. This
* check ensures that the receiving system can understand the
* checksum function transmitted.
*/
ASSERT3U(checksum, <, ZIO_CHECKSUM_LEGACY_FUNCTIONS);
VERIFY0(dnode_hold(os, object, FTAG, &dn));
ASSERT3U(checksum, <, ZIO_CHECKSUM_FUNCTIONS);
dn->dn_checksum = checksum;
dnode_setdirty(dn, tx);
dnode_rele(dn, FTAG);
}
void
dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
dmu_tx_t *tx)
{
dnode_t *dn;
/*
* Send streams include each object's compression function. This
* check ensures that the receiving system can understand the
* compression function transmitted.
*/
ASSERT3U(compress, <, ZIO_COMPRESS_LEGACY_FUNCTIONS);
VERIFY0(dnode_hold(os, object, FTAG, &dn));
dn->dn_compress = compress;
dnode_setdirty(dn, tx);
dnode_rele(dn, FTAG);
}
/*
* When the "redundant_metadata" property is set to "most", only indirect
* blocks of this level and higher will have an additional ditto block.
*/
static const int zfs_redundant_metadata_most_ditto_level = 2;
void
dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
{
dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET;
boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) ||
(wp & WP_SPILL));
enum zio_checksum checksum = os->os_checksum;
enum zio_compress compress = os->os_compress;
uint8_t complevel = os->os_complevel;
enum zio_checksum dedup_checksum = os->os_dedup_checksum;
boolean_t dedup = B_FALSE;
boolean_t nopwrite = B_FALSE;
boolean_t dedup_verify = os->os_dedup_verify;
boolean_t encrypt = B_FALSE;
int copies = os->os_copies;
/*
* We maintain different write policies for each of the following
* types of data:
* 1. metadata
* 2. preallocated blocks (i.e. level-0 blocks of a dump device)
* 3. all other level 0 blocks
*/
if (ismd) {
/*
* XXX -- we should design a compression algorithm
* that specializes in arrays of bps.
*/
compress = zio_compress_select(os->os_spa,
ZIO_COMPRESS_ON, ZIO_COMPRESS_ON);
/*
* Metadata always gets checksummed. If the data
* checksum is multi-bit correctable, and it's not a
* ZBT-style checksum, then it's suitable for metadata
* as well. Otherwise, the metadata checksum defaults
* to fletcher4.
*/
if (!(zio_checksum_table[checksum].ci_flags &
ZCHECKSUM_FLAG_METADATA) ||
(zio_checksum_table[checksum].ci_flags &
ZCHECKSUM_FLAG_EMBEDDED))
checksum = ZIO_CHECKSUM_FLETCHER_4;
switch (os->os_redundant_metadata) {
case ZFS_REDUNDANT_METADATA_ALL:
copies++;
break;
case ZFS_REDUNDANT_METADATA_MOST:
if (level >= zfs_redundant_metadata_most_ditto_level ||
DMU_OT_IS_METADATA(type) || (wp & WP_SPILL))
copies++;
break;
case ZFS_REDUNDANT_METADATA_SOME:
if (DMU_OT_IS_CRITICAL(type))
copies++;
break;
case ZFS_REDUNDANT_METADATA_NONE:
break;
}
} else if (wp & WP_NOFILL) {
ASSERT(level == 0);
/*
* If we're writing preallocated blocks, we aren't actually
* writing them so don't set any policy properties. These
* blocks are currently only used by an external subsystem
* outside of zfs (i.e. dump) and not written by the zio
* pipeline.
*/
compress = ZIO_COMPRESS_OFF;
checksum = ZIO_CHECKSUM_OFF;
} else {
compress = zio_compress_select(os->os_spa, dn->dn_compress,
compress);
complevel = zio_complevel_select(os->os_spa, compress,
complevel, complevel);
checksum = (dedup_checksum == ZIO_CHECKSUM_OFF) ?
zio_checksum_select(dn->dn_checksum, checksum) :
dedup_checksum;
/*
* Determine dedup setting. If we are in dmu_sync(),
* we won't actually dedup now because that's all
* done in syncing context; but we do want to use the
* dedup checksum. If the checksum is not strong
* enough to ensure unique signatures, force
* dedup_verify.
*/
if (dedup_checksum != ZIO_CHECKSUM_OFF) {
dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE;
if (!(zio_checksum_table[checksum].ci_flags &
ZCHECKSUM_FLAG_DEDUP))
dedup_verify = B_TRUE;
}
/*
* Enable nopwrite if we have secure enough checksum
* algorithm (see comment in zio_nop_write) and
* compression is enabled. We don't enable nopwrite if
* dedup is enabled as the two features are mutually
* exclusive.
*/
nopwrite = (!dedup && (zio_checksum_table[checksum].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE) &&
compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled);
}
/*
* All objects in an encrypted objset are protected from modification
* via a MAC. Encrypted objects store their IV and salt in the last DVA
* in the bp, so we cannot use all copies. Encrypted objects are also
* not subject to nopwrite since writing the same data will still
* result in a new ciphertext. Only encrypted blocks can be dedup'd
* to avoid ambiguity in the dedup code since the DDT does not store
* object types.
*/
if (os->os_encrypted && (wp & WP_NOFILL) == 0) {
encrypt = B_TRUE;
if (DMU_OT_IS_ENCRYPTED(type)) {
copies = MIN(copies, SPA_DVAS_PER_BP - 1);
nopwrite = B_FALSE;
} else {
dedup = B_FALSE;
}
if (level <= 0 &&
(type == DMU_OT_DNODE || type == DMU_OT_OBJSET)) {
compress = ZIO_COMPRESS_EMPTY;
}
}
zp->zp_compress = compress;
zp->zp_complevel = complevel;
zp->zp_checksum = checksum;
zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type;
zp->zp_level = level;
zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa));
zp->zp_dedup = dedup;
zp->zp_dedup_verify = dedup && dedup_verify;
zp->zp_nopwrite = nopwrite;
zp->zp_encrypt = encrypt;
zp->zp_byteorder = ZFS_HOST_BYTEORDER;
memset(zp->zp_salt, 0, ZIO_DATA_SALT_LEN);
memset(zp->zp_iv, 0, ZIO_DATA_IV_LEN);
memset(zp->zp_mac, 0, ZIO_DATA_MAC_LEN);
zp->zp_zpl_smallblk = DMU_OT_IS_FILE(zp->zp_type) ?
os->os_zpl_special_smallblock : 0;
ASSERT3U(zp->zp_compress, !=, ZIO_COMPRESS_INHERIT);
}
/*
* Reports the location of data and holes in an object. In order to
* accurately report holes all dirty data must be synced to disk. This
* causes extremely poor performance when seeking for holes in a dirty file.
* As a compromise, only provide hole data when the dnode is clean. When
* a dnode is dirty report the dnode as having no holes by returning EBUSY
* which is always safe to do.
*/
int
dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
{
dnode_t *dn;
int restarted = 0, err;
restart:
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dnode_is_dirty(dn)) {
/*
* If the zfs_dmu_offset_next_sync module option is enabled
* then hole reporting has been requested. Dirty dnodes
* must be synced to disk to accurately report holes.
*
* Provided a RL_READER rangelock spanning 0-UINT64_MAX is
* held by the caller only a single restart will be required.
* We tolerate callers which do not hold the rangelock by
* returning EBUSY and not reporting holes after one restart.
*/
if (zfs_dmu_offset_next_sync) {
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
if (restarted)
return (SET_ERROR(EBUSY));
txg_wait_synced(dmu_objset_pool(os), 0);
restarted = 1;
goto restart;
}
err = SET_ERROR(EBUSY);
} else {
err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK |
(hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0);
}
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_read_l0_bps(objset_t *os, uint64_t object, uint64_t offset, uint64_t length,
blkptr_t *bps, size_t *nbpsp)
{
dmu_buf_t **dbp, *dbuf;
dmu_buf_impl_t *db;
blkptr_t *bp;
int error, numbufs;
error = dmu_buf_hold_array(os, object, offset, length, FALSE, FTAG,
&numbufs, &dbp);
if (error != 0) {
if (error == ESRCH) {
error = SET_ERROR(ENXIO);
}
return (error);
}
ASSERT3U(numbufs, <=, *nbpsp);
for (int i = 0; i < numbufs; i++) {
dbuf = dbp[i];
db = (dmu_buf_impl_t *)dbuf;
mutex_enter(&db->db_mtx);
if (!list_is_empty(&db->db_dirty_records)) {
dbuf_dirty_record_t *dr;
dr = list_head(&db->db_dirty_records);
if (dr->dt.dl.dr_brtwrite) {
/*
* This is very special case where we clone a
* block and in the same transaction group we
* read its BP (most likely to clone the clone).
*/
bp = &dr->dt.dl.dr_overridden_by;
} else {
/*
* The block was modified in the same
* transaction group.
*/
mutex_exit(&db->db_mtx);
error = SET_ERROR(EAGAIN);
goto out;
}
} else {
bp = db->db_blkptr;
}
mutex_exit(&db->db_mtx);
if (bp == NULL) {
/*
* The block was created in this transaction group,
* so it has no BP yet.
*/
error = SET_ERROR(EAGAIN);
goto out;
}
/*
* Make sure we clone only data blocks.
*/
if (BP_IS_METADATA(bp) && !BP_IS_HOLE(bp)) {
error = SET_ERROR(EINVAL);
goto out;
}
bps[i] = *bp;
}
*nbpsp = numbufs;
out:
dmu_buf_rele_array(dbp, numbufs, FTAG);
return (error);
}
int
dmu_brt_clone(objset_t *os, uint64_t object, uint64_t offset, uint64_t length,
dmu_tx_t *tx, const blkptr_t *bps, size_t nbps, boolean_t replay)
{
spa_t *spa;
dmu_buf_t **dbp, *dbuf;
dmu_buf_impl_t *db;
struct dirty_leaf *dl;
dbuf_dirty_record_t *dr;
const blkptr_t *bp;
int error = 0, i, numbufs;
spa = os->os_spa;
VERIFY0(dmu_buf_hold_array(os, object, offset, length, FALSE, FTAG,
&numbufs, &dbp));
ASSERT3U(nbps, ==, numbufs);
/*
* Before we start cloning make sure that the dbufs sizes match new BPs
* sizes. If they don't, that's a no-go, as we are not able to shrink
* dbufs.
*/
for (i = 0; i < numbufs; i++) {
dbuf = dbp[i];
db = (dmu_buf_impl_t *)dbuf;
bp = &bps[i];
ASSERT0(db->db_level);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(db->db_blkid != DMU_SPILL_BLKID);
if (!BP_IS_HOLE(bp) && BP_GET_LSIZE(bp) != dbuf->db_size) {
error = SET_ERROR(EXDEV);
goto out;
}
}
for (i = 0; i < numbufs; i++) {
dbuf = dbp[i];
db = (dmu_buf_impl_t *)dbuf;
bp = &bps[i];
ASSERT0(db->db_level);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(db->db_blkid != DMU_SPILL_BLKID);
ASSERT(BP_IS_HOLE(bp) || dbuf->db_size == BP_GET_LSIZE(bp));
dmu_buf_will_clone(dbuf, tx);
mutex_enter(&db->db_mtx);
dr = list_head(&db->db_dirty_records);
VERIFY(dr != NULL);
ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
dl = &dr->dt.dl;
dl->dr_overridden_by = *bp;
dl->dr_brtwrite = B_TRUE;
dl->dr_override_state = DR_OVERRIDDEN;
if (BP_IS_HOLE(bp)) {
dl->dr_overridden_by.blk_birth = 0;
dl->dr_overridden_by.blk_phys_birth = 0;
} else {
dl->dr_overridden_by.blk_birth = dr->dr_txg;
if (!BP_IS_EMBEDDED(bp)) {
dl->dr_overridden_by.blk_phys_birth =
BP_PHYSICAL_BIRTH(bp);
}
}
mutex_exit(&db->db_mtx);
/*
* When data in embedded into BP there is no need to create
* BRT entry as there is no data block. Just copy the BP as
* it contains the data.
* Also, when replaying ZIL we don't want to bump references
* in the BRT as it was already done during ZIL claim.
*/
if (!replay && !BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) {
brt_pending_add(spa, bp, tx);
}
}
out:
dmu_buf_rele_array(dbp, numbufs, FTAG);
return (error);
}
void
__dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
{
dnode_phys_t *dnp = dn->dn_phys;
doi->doi_data_block_size = dn->dn_datablksz;
doi->doi_metadata_block_size = dn->dn_indblkshift ?
1ULL << dn->dn_indblkshift : 0;
doi->doi_type = dn->dn_type;
doi->doi_bonus_type = dn->dn_bonustype;
doi->doi_bonus_size = dn->dn_bonuslen;
doi->doi_dnodesize = dn->dn_num_slots << DNODE_SHIFT;
doi->doi_indirection = dn->dn_nlevels;
doi->doi_checksum = dn->dn_checksum;
doi->doi_compress = dn->dn_compress;
doi->doi_nblkptr = dn->dn_nblkptr;
doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9;
doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
doi->doi_fill_count = 0;
for (int i = 0; i < dnp->dn_nblkptr; i++)
doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]);
}
void
dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
{
rw_enter(&dn->dn_struct_rwlock, RW_READER);
mutex_enter(&dn->dn_mtx);
__dmu_object_info_from_dnode(dn, doi);
mutex_exit(&dn->dn_mtx);
rw_exit(&dn->dn_struct_rwlock);
}
/*
* Get information on a DMU object.
* If doi is NULL, just indicates whether the object exists.
*/
int
dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi)
{
dnode_t *dn;
int err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
if (doi != NULL)
dmu_object_info_from_dnode(dn, doi);
dnode_rele(dn, FTAG);
return (0);
}
/*
* As above, but faster; can be used when you have a held dbuf in hand.
*/
void
dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
DB_DNODE_ENTER(db);
dmu_object_info_from_dnode(DB_DNODE(db), doi);
DB_DNODE_EXIT(db);
}
/*
* Faster still when you only care about the size.
*/
void
dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize,
u_longlong_t *nblk512)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
*blksize = dn->dn_datablksz;
/* add in number of slots used for the dnode itself */
*nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >>
SPA_MINBLOCKSHIFT) + dn->dn_num_slots;
DB_DNODE_EXIT(db);
}
void
dmu_object_dnsize_from_db(dmu_buf_t *db_fake, int *dnsize)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
*dnsize = dn->dn_num_slots << DNODE_SHIFT;
DB_DNODE_EXIT(db);
}
void
byteswap_uint64_array(void *vbuf, size_t size)
{
uint64_t *buf = vbuf;
size_t count = size >> 3;
int i;
ASSERT((size & 7) == 0);
for (i = 0; i < count; i++)
buf[i] = BSWAP_64(buf[i]);
}
void
byteswap_uint32_array(void *vbuf, size_t size)
{
uint32_t *buf = vbuf;
size_t count = size >> 2;
int i;
ASSERT((size & 3) == 0);
for (i = 0; i < count; i++)
buf[i] = BSWAP_32(buf[i]);
}
void
byteswap_uint16_array(void *vbuf, size_t size)
{
uint16_t *buf = vbuf;
size_t count = size >> 1;
int i;
ASSERT((size & 1) == 0);
for (i = 0; i < count; i++)
buf[i] = BSWAP_16(buf[i]);
}
void
byteswap_uint8_array(void *vbuf, size_t size)
{
(void) vbuf, (void) size;
}
void
dmu_init(void)
{
abd_init();
zfs_dbgmsg_init();
sa_cache_init();
dmu_objset_init();
dnode_init();
zfetch_init();
dmu_tx_init();
l2arc_init();
arc_init();
dbuf_init();
}
void
dmu_fini(void)
{
arc_fini(); /* arc depends on l2arc, so arc must go first */
l2arc_fini();
dmu_tx_fini();
zfetch_fini();
dbuf_fini();
dnode_fini();
dmu_objset_fini();
sa_cache_fini();
zfs_dbgmsg_fini();
abd_fini();
}
EXPORT_SYMBOL(dmu_bonus_hold);
EXPORT_SYMBOL(dmu_bonus_hold_by_dnode);
EXPORT_SYMBOL(dmu_buf_hold_array_by_bonus);
EXPORT_SYMBOL(dmu_buf_rele_array);
EXPORT_SYMBOL(dmu_prefetch);
EXPORT_SYMBOL(dmu_free_range);
EXPORT_SYMBOL(dmu_free_long_range);
EXPORT_SYMBOL(dmu_free_long_object);
EXPORT_SYMBOL(dmu_read);
EXPORT_SYMBOL(dmu_read_by_dnode);
EXPORT_SYMBOL(dmu_write);
EXPORT_SYMBOL(dmu_write_by_dnode);
EXPORT_SYMBOL(dmu_prealloc);
EXPORT_SYMBOL(dmu_object_info);
EXPORT_SYMBOL(dmu_object_info_from_dnode);
EXPORT_SYMBOL(dmu_object_info_from_db);
EXPORT_SYMBOL(dmu_object_size_from_db);
EXPORT_SYMBOL(dmu_object_dnsize_from_db);
EXPORT_SYMBOL(dmu_object_set_nlevels);
EXPORT_SYMBOL(dmu_object_set_blocksize);
EXPORT_SYMBOL(dmu_object_set_maxblkid);
EXPORT_SYMBOL(dmu_object_set_checksum);
EXPORT_SYMBOL(dmu_object_set_compress);
EXPORT_SYMBOL(dmu_offset_next);
EXPORT_SYMBOL(dmu_write_policy);
EXPORT_SYMBOL(dmu_sync);
EXPORT_SYMBOL(dmu_request_arcbuf);
EXPORT_SYMBOL(dmu_return_arcbuf);
EXPORT_SYMBOL(dmu_assign_arcbuf_by_dnode);
EXPORT_SYMBOL(dmu_assign_arcbuf_by_dbuf);
EXPORT_SYMBOL(dmu_buf_hold);
EXPORT_SYMBOL(dmu_ot);
ZFS_MODULE_PARAM(zfs, zfs_, nopwrite_enabled, INT, ZMOD_RW,
"Enable NOP writes");
ZFS_MODULE_PARAM(zfs, zfs_, per_txg_dirty_frees_percent, UINT, ZMOD_RW,
"Percentage of dirtied blocks from frees in one TXG");
ZFS_MODULE_PARAM(zfs, zfs_, dmu_offset_next_sync, INT, ZMOD_RW,
"Enable forcing txg sync to find holes");
/* CSTYLED */
ZFS_MODULE_PARAM(zfs, , dmu_prefetch_max, UINT, ZMOD_RW,
"Limit one prefetch call to this size");
diff --git a/sys/contrib/openzfs/module/zfs/dmu_recv.c b/sys/contrib/openzfs/module/zfs/dmu_recv.c
index 2fdd7c1ece73..05ca91717c2f 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_recv.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_recv.c
@@ -1,3799 +1,3801 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
* Copyright 2014 HybridCluster. All rights reserved.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2019 Datto Inc.
* Copyright (c) 2022 Axcient.
*/
#include <sys/arc.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_send.h>
#include <sys/dmu_recv.h>
#include <sys/dmu_tx.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/zfs_ioctl.h>
#include <sys/zap.h>
#include <sys/zvol.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_znode.h>
#include <zfs_fletcher.h>
#include <sys/avl.h>
#include <sys/ddt.h>
#include <sys/zfs_onexit.h>
#include <sys/dsl_destroy.h>
#include <sys/blkptr.h>
#include <sys/dsl_bookmark.h>
#include <sys/zfeature.h>
#include <sys/bqueue.h>
#include <sys/objlist.h>
#ifdef _KERNEL
#include <sys/zfs_vfsops.h>
#endif
#include <sys/zfs_file.h>
static uint_t zfs_recv_queue_length = SPA_MAXBLOCKSIZE;
static uint_t zfs_recv_queue_ff = 20;
static uint_t zfs_recv_write_batch_size = 1024 * 1024;
static int zfs_recv_best_effort_corrective = 0;
static const void *const dmu_recv_tag = "dmu_recv_tag";
const char *const recv_clone_name = "%recv";
typedef enum {
ORNS_NO,
ORNS_YES,
ORNS_MAYBE
} or_need_sync_t;
static int receive_read_payload_and_next_header(dmu_recv_cookie_t *ra, int len,
void *buf);
struct receive_record_arg {
dmu_replay_record_t header;
void *payload; /* Pointer to a buffer containing the payload */
/*
* If the record is a WRITE or SPILL, pointer to the abd containing the
* payload.
*/
abd_t *abd;
int payload_size;
uint64_t bytes_read; /* bytes read from stream when record created */
boolean_t eos_marker; /* Marks the end of the stream */
bqueue_node_t node;
};
struct receive_writer_arg {
objset_t *os;
boolean_t byteswap;
bqueue_t q;
/*
* These three members are used to signal to the main thread when
* we're done.
*/
kmutex_t mutex;
kcondvar_t cv;
boolean_t done;
int err;
const char *tofs;
boolean_t heal;
boolean_t resumable;
boolean_t raw; /* DMU_BACKUP_FEATURE_RAW set */
boolean_t spill; /* DRR_FLAG_SPILL_BLOCK set */
boolean_t full; /* this is a full send stream */
uint64_t last_object;
uint64_t last_offset;
uint64_t max_object; /* highest object ID referenced in stream */
uint64_t bytes_read; /* bytes read when current record created */
list_t write_batch;
/* Encryption parameters for the last received DRR_OBJECT_RANGE */
boolean_t or_crypt_params_present;
uint64_t or_firstobj;
uint64_t or_numslots;
uint8_t or_salt[ZIO_DATA_SALT_LEN];
uint8_t or_iv[ZIO_DATA_IV_LEN];
uint8_t or_mac[ZIO_DATA_MAC_LEN];
boolean_t or_byteorder;
zio_t *heal_pio;
/* Keep track of DRR_FREEOBJECTS right after DRR_OBJECT_RANGE */
or_need_sync_t or_need_sync;
};
typedef struct dmu_recv_begin_arg {
const char *drba_origin;
dmu_recv_cookie_t *drba_cookie;
cred_t *drba_cred;
proc_t *drba_proc;
dsl_crypto_params_t *drba_dcp;
} dmu_recv_begin_arg_t;
static void
byteswap_record(dmu_replay_record_t *drr)
{
#define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
#define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
drr->drr_type = BSWAP_32(drr->drr_type);
drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
switch (drr->drr_type) {
case DRR_BEGIN:
DO64(drr_begin.drr_magic);
DO64(drr_begin.drr_versioninfo);
DO64(drr_begin.drr_creation_time);
DO32(drr_begin.drr_type);
DO32(drr_begin.drr_flags);
DO64(drr_begin.drr_toguid);
DO64(drr_begin.drr_fromguid);
break;
case DRR_OBJECT:
DO64(drr_object.drr_object);
DO32(drr_object.drr_type);
DO32(drr_object.drr_bonustype);
DO32(drr_object.drr_blksz);
DO32(drr_object.drr_bonuslen);
DO32(drr_object.drr_raw_bonuslen);
DO64(drr_object.drr_toguid);
DO64(drr_object.drr_maxblkid);
break;
case DRR_FREEOBJECTS:
DO64(drr_freeobjects.drr_firstobj);
DO64(drr_freeobjects.drr_numobjs);
DO64(drr_freeobjects.drr_toguid);
break;
case DRR_WRITE:
DO64(drr_write.drr_object);
DO32(drr_write.drr_type);
DO64(drr_write.drr_offset);
DO64(drr_write.drr_logical_size);
DO64(drr_write.drr_toguid);
ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum);
DO64(drr_write.drr_key.ddk_prop);
DO64(drr_write.drr_compressed_size);
break;
case DRR_WRITE_EMBEDDED:
DO64(drr_write_embedded.drr_object);
DO64(drr_write_embedded.drr_offset);
DO64(drr_write_embedded.drr_length);
DO64(drr_write_embedded.drr_toguid);
DO32(drr_write_embedded.drr_lsize);
DO32(drr_write_embedded.drr_psize);
break;
case DRR_FREE:
DO64(drr_free.drr_object);
DO64(drr_free.drr_offset);
DO64(drr_free.drr_length);
DO64(drr_free.drr_toguid);
break;
case DRR_SPILL:
DO64(drr_spill.drr_object);
DO64(drr_spill.drr_length);
DO64(drr_spill.drr_toguid);
DO64(drr_spill.drr_compressed_size);
DO32(drr_spill.drr_type);
break;
case DRR_OBJECT_RANGE:
DO64(drr_object_range.drr_firstobj);
DO64(drr_object_range.drr_numslots);
DO64(drr_object_range.drr_toguid);
break;
case DRR_REDACT:
DO64(drr_redact.drr_object);
DO64(drr_redact.drr_offset);
DO64(drr_redact.drr_length);
DO64(drr_redact.drr_toguid);
break;
case DRR_END:
DO64(drr_end.drr_toguid);
ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum);
break;
default:
break;
}
if (drr->drr_type != DRR_BEGIN) {
ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum);
}
#undef DO64
#undef DO32
}
static boolean_t
redact_snaps_contains(uint64_t *snaps, uint64_t num_snaps, uint64_t guid)
{
for (int i = 0; i < num_snaps; i++) {
if (snaps[i] == guid)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Check that the new stream we're trying to receive is redacted with respect to
* a subset of the snapshots that the origin was redacted with respect to. For
* the reasons behind this, see the man page on redacted zfs sends and receives.
*/
static boolean_t
compatible_redact_snaps(uint64_t *origin_snaps, uint64_t origin_num_snaps,
uint64_t *redact_snaps, uint64_t num_redact_snaps)
{
/*
* Short circuit the comparison; if we are redacted with respect to
* more snapshots than the origin, we can't be redacted with respect
* to a subset.
*/
if (num_redact_snaps > origin_num_snaps) {
return (B_FALSE);
}
for (int i = 0; i < num_redact_snaps; i++) {
if (!redact_snaps_contains(origin_snaps, origin_num_snaps,
redact_snaps[i])) {
return (B_FALSE);
}
}
return (B_TRUE);
}
static boolean_t
redact_check(dmu_recv_begin_arg_t *drba, dsl_dataset_t *origin)
{
uint64_t *origin_snaps;
uint64_t origin_num_snaps;
dmu_recv_cookie_t *drc = drba->drba_cookie;
struct drr_begin *drrb = drc->drc_drrb;
int featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
int err = 0;
boolean_t ret = B_TRUE;
uint64_t *redact_snaps;
uint_t numredactsnaps;
/*
* If this is a full send stream, we're safe no matter what.
*/
if (drrb->drr_fromguid == 0)
return (ret);
VERIFY(dsl_dataset_get_uint64_array_feature(origin,
SPA_FEATURE_REDACTED_DATASETS, &origin_num_snaps, &origin_snaps));
if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
BEGINNV_REDACT_FROM_SNAPS, &redact_snaps, &numredactsnaps) ==
0) {
/*
* If the send stream was sent from the redaction bookmark or
* the redacted version of the dataset, then we're safe. Verify
* that this is from the a compatible redaction bookmark or
* redacted dataset.
*/
if (!compatible_redact_snaps(origin_snaps, origin_num_snaps,
redact_snaps, numredactsnaps)) {
err = EINVAL;
}
} else if (featureflags & DMU_BACKUP_FEATURE_REDACTED) {
/*
* If the stream is redacted, it must be redacted with respect
* to a subset of what the origin is redacted with respect to.
* See case number 2 in the zfs man page section on redacted zfs
* send.
*/
err = nvlist_lookup_uint64_array(drc->drc_begin_nvl,
BEGINNV_REDACT_SNAPS, &redact_snaps, &numredactsnaps);
if (err != 0 || !compatible_redact_snaps(origin_snaps,
origin_num_snaps, redact_snaps, numredactsnaps)) {
err = EINVAL;
}
} else if (!redact_snaps_contains(origin_snaps, origin_num_snaps,
drrb->drr_toguid)) {
/*
* If the stream isn't redacted but the origin is, this must be
* one of the snapshots the origin is redacted with respect to.
* See case number 1 in the zfs man page section on redacted zfs
* send.
*/
err = EINVAL;
}
if (err != 0)
ret = B_FALSE;
return (ret);
}
/*
* If we previously received a stream with --large-block, we don't support
* receiving an incremental on top of it without --large-block. This avoids
* forcing a read-modify-write or trying to re-aggregate a string of WRITE
* records.
*/
static int
recv_check_large_blocks(dsl_dataset_t *ds, uint64_t featureflags)
{
if (dsl_dataset_feature_is_active(ds, SPA_FEATURE_LARGE_BLOCKS) &&
!(featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS))
return (SET_ERROR(ZFS_ERR_STREAM_LARGE_BLOCK_MISMATCH));
return (0);
}
static int
recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
uint64_t fromguid, uint64_t featureflags)
{
uint64_t obj;
uint64_t children;
int error;
dsl_dataset_t *snap;
dsl_pool_t *dp = ds->ds_dir->dd_pool;
boolean_t encrypted = ds->ds_dir->dd_crypto_obj != 0;
boolean_t raw = (featureflags & DMU_BACKUP_FEATURE_RAW) != 0;
boolean_t embed = (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) != 0;
/* Temporary clone name must not exist. */
error = zap_lookup(dp->dp_meta_objset,
dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
8, 1, &obj);
if (error != ENOENT)
return (error == 0 ? SET_ERROR(EBUSY) : error);
/* Resume state must not be set. */
if (dsl_dataset_has_resume_receive_state(ds))
return (SET_ERROR(EBUSY));
/* New snapshot name must not exist if we're not healing it. */
error = zap_lookup(dp->dp_meta_objset,
dsl_dataset_phys(ds)->ds_snapnames_zapobj,
drba->drba_cookie->drc_tosnap, 8, 1, &obj);
if (drba->drba_cookie->drc_heal) {
if (error != 0)
return (error);
} else if (error != ENOENT) {
return (error == 0 ? SET_ERROR(EEXIST) : error);
}
/* Must not have children if receiving a ZVOL. */
error = zap_count(dp->dp_meta_objset,
dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &children);
if (error != 0)
return (error);
if (drba->drba_cookie->drc_drrb->drr_type != DMU_OST_ZFS &&
children > 0)
return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
/*
* Check snapshot limit before receiving. We'll recheck again at the
* end, but might as well abort before receiving if we're already over
* the limit.
*
* Note that we do not check the file system limit with
* dsl_dir_fscount_check because the temporary %clones don't count
* against that limit.
*/
error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
NULL, drba->drba_cred, drba->drba_proc);
if (error != 0)
return (error);
if (drba->drba_cookie->drc_heal) {
/* Encryption is incompatible with embedded data. */
if (encrypted && embed)
return (SET_ERROR(EINVAL));
/* Healing is not supported when in 'force' mode. */
if (drba->drba_cookie->drc_force)
return (SET_ERROR(EINVAL));
/* Must have keys loaded if doing encrypted non-raw recv. */
if (encrypted && !raw) {
if (spa_keystore_lookup_key(dp->dp_spa, ds->ds_object,
NULL, NULL) != 0)
return (SET_ERROR(EACCES));
}
error = dsl_dataset_hold_obj(dp, obj, FTAG, &snap);
if (error != 0)
return (error);
/*
* When not doing best effort corrective recv healing can only
* be done if the send stream is for the same snapshot as the
* one we are trying to heal.
*/
if (zfs_recv_best_effort_corrective == 0 &&
drba->drba_cookie->drc_drrb->drr_toguid !=
dsl_dataset_phys(snap)->ds_guid) {
dsl_dataset_rele(snap, FTAG);
return (SET_ERROR(ENOTSUP));
}
dsl_dataset_rele(snap, FTAG);
} else if (fromguid != 0) {
/* Sanity check the incremental recv */
uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
/* Can't perform a raw receive on top of a non-raw receive */
if (!encrypted && raw)
return (SET_ERROR(EINVAL));
/* Encryption is incompatible with embedded data */
if (encrypted && embed)
return (SET_ERROR(EINVAL));
/* Find snapshot in this dir that matches fromguid. */
while (obj != 0) {
error = dsl_dataset_hold_obj(dp, obj, FTAG,
&snap);
if (error != 0)
return (SET_ERROR(ENODEV));
if (snap->ds_dir != ds->ds_dir) {
dsl_dataset_rele(snap, FTAG);
return (SET_ERROR(ENODEV));
}
if (dsl_dataset_phys(snap)->ds_guid == fromguid)
break;
obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
dsl_dataset_rele(snap, FTAG);
}
if (obj == 0)
return (SET_ERROR(ENODEV));
if (drba->drba_cookie->drc_force) {
drba->drba_cookie->drc_fromsnapobj = obj;
} else {
/*
* If we are not forcing, there must be no
* changes since fromsnap. Raw sends have an
* additional constraint that requires that
* no "noop" snapshots exist between fromsnap
* and tosnap for the IVset checking code to
* work properly.
*/
if (dsl_dataset_modified_since_snap(ds, snap) ||
(raw &&
dsl_dataset_phys(ds)->ds_prev_snap_obj !=
snap->ds_object)) {
dsl_dataset_rele(snap, FTAG);
return (SET_ERROR(ETXTBSY));
}
drba->drba_cookie->drc_fromsnapobj =
ds->ds_prev->ds_object;
}
if (dsl_dataset_feature_is_active(snap,
SPA_FEATURE_REDACTED_DATASETS) && !redact_check(drba,
snap)) {
dsl_dataset_rele(snap, FTAG);
return (SET_ERROR(EINVAL));
}
error = recv_check_large_blocks(snap, featureflags);
if (error != 0) {
dsl_dataset_rele(snap, FTAG);
return (error);
}
dsl_dataset_rele(snap, FTAG);
} else {
/* If full and not healing then must be forced. */
if (!drba->drba_cookie->drc_force)
return (SET_ERROR(EEXIST));
/*
* We don't support using zfs recv -F to blow away
* encrypted filesystems. This would require the
* dsl dir to point to the old encryption key and
* the new one at the same time during the receive.
*/
if ((!encrypted && raw) || encrypted)
return (SET_ERROR(EINVAL));
/*
* Perform the same encryption checks we would if
* we were creating a new dataset from scratch.
*/
if (!raw) {
boolean_t will_encrypt;
error = dmu_objset_create_crypt_check(
ds->ds_dir->dd_parent, drba->drba_dcp,
&will_encrypt);
if (error != 0)
return (error);
if (will_encrypt && embed)
return (SET_ERROR(EINVAL));
}
}
return (0);
}
/*
* Check that any feature flags used in the data stream we're receiving are
* supported by the pool we are receiving into.
*
* Note that some of the features we explicitly check here have additional
* (implicit) features they depend on, but those dependencies are enforced
* through the zfeature_register() calls declaring the features that we
* explicitly check.
*/
static int
recv_begin_check_feature_flags_impl(uint64_t featureflags, spa_t *spa)
{
/*
* Check if there are any unsupported feature flags.
*/
if (!DMU_STREAM_SUPPORTED(featureflags)) {
return (SET_ERROR(ZFS_ERR_UNKNOWN_SEND_STREAM_FEATURE));
}
/* Verify pool version supports SA if SA_SPILL feature set */
if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
spa_version(spa) < SPA_VERSION_SA)
return (SET_ERROR(ENOTSUP));
/*
* LZ4 compressed, ZSTD compressed, embedded, mooched, large blocks,
* and large_dnodes in the stream can only be used if those pool
* features are enabled because we don't attempt to decompress /
* un-embed / un-mooch / split up the blocks / dnodes during the
* receive process.
*/
if ((featureflags & DMU_BACKUP_FEATURE_LZ4) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_LZ4_COMPRESS))
return (SET_ERROR(ENOTSUP));
if ((featureflags & DMU_BACKUP_FEATURE_ZSTD) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_ZSTD_COMPRESS))
return (SET_ERROR(ENOTSUP));
if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA))
return (SET_ERROR(ENOTSUP));
if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
return (SET_ERROR(ENOTSUP));
if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
return (SET_ERROR(ENOTSUP));
/*
* Receiving redacted streams requires that redacted datasets are
* enabled.
*/
if ((featureflags & DMU_BACKUP_FEATURE_REDACTED) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_REDACTED_DATASETS))
return (SET_ERROR(ENOTSUP));
return (0);
}
static int
dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
{
dmu_recv_begin_arg_t *drba = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
uint64_t fromguid = drrb->drr_fromguid;
int flags = drrb->drr_flags;
ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
int error;
uint64_t featureflags = drba->drba_cookie->drc_featureflags;
dsl_dataset_t *ds;
const char *tofs = drba->drba_cookie->drc_tofs;
/* already checked */
ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING));
if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
DMU_COMPOUNDSTREAM ||
drrb->drr_type >= DMU_OST_NUMTYPES ||
((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
return (SET_ERROR(EINVAL));
error = recv_begin_check_feature_flags_impl(featureflags, dp->dp_spa);
if (error != 0)
return (error);
/* Resumable receives require extensible datasets */
if (drba->drba_cookie->drc_resumable &&
!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET))
return (SET_ERROR(ENOTSUP));
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
/* raw receives require the encryption feature */
if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ENCRYPTION))
return (SET_ERROR(ENOTSUP));
/* embedded data is incompatible with encryption and raw recv */
if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
return (SET_ERROR(EINVAL));
/* raw receives require spill block allocation flag */
if (!(flags & DRR_FLAG_SPILL_BLOCK))
return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING));
} else {
/*
* We support unencrypted datasets below encrypted ones now,
* so add the DS_HOLD_FLAG_DECRYPT flag only if we are dealing
* with a dataset we may encrypt.
*/
if (drba->drba_dcp == NULL ||
drba->drba_dcp->cp_crypt != ZIO_CRYPT_OFF) {
dsflags |= DS_HOLD_FLAG_DECRYPT;
}
}
error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
if (error == 0) {
/* target fs already exists; recv into temp clone */
/* Can't recv a clone into an existing fs */
if (flags & DRR_FLAG_CLONE || drba->drba_origin) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
error = recv_begin_check_existing_impl(drba, ds, fromguid,
featureflags);
dsl_dataset_rele_flags(ds, dsflags, FTAG);
} else if (error == ENOENT) {
/* target fs does not exist; must be a full backup or clone */
char buf[ZFS_MAX_DATASET_NAME_LEN];
objset_t *os;
/* healing recv must be done "into" an existing snapshot */
if (drba->drba_cookie->drc_heal == B_TRUE)
return (SET_ERROR(ENOTSUP));
/*
* If it's a non-clone incremental, we are missing the
* target fs, so fail the recv.
*/
if (fromguid != 0 && !((flags & DRR_FLAG_CLONE) ||
drba->drba_origin))
return (SET_ERROR(ENOENT));
/*
* If we're receiving a full send as a clone, and it doesn't
* contain all the necessary free records and freeobject
* records, reject it.
*/
if (fromguid == 0 && drba->drba_origin != NULL &&
!(flags & DRR_FLAG_FREERECORDS))
return (SET_ERROR(EINVAL));
/* Open the parent of tofs */
ASSERT3U(strlen(tofs), <, sizeof (buf));
(void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
error = dsl_dataset_hold(dp, buf, FTAG, &ds);
if (error != 0)
return (error);
if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0 &&
drba->drba_origin == NULL) {
boolean_t will_encrypt;
/*
* Check that we aren't breaking any encryption rules
* and that we have all the parameters we need to
* create an encrypted dataset if necessary. If we are
* making an encrypted dataset the stream can't have
* embedded data.
*/
error = dmu_objset_create_crypt_check(ds->ds_dir,
drba->drba_dcp, &will_encrypt);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (will_encrypt &&
(featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
}
/*
* Check filesystem and snapshot limits before receiving. We'll
* recheck snapshot limits again at the end (we create the
* filesystems and increment those counts during begin_sync).
*/
error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
ZFS_PROP_FILESYSTEM_LIMIT, NULL,
drba->drba_cred, drba->drba_proc);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
ZFS_PROP_SNAPSHOT_LIMIT, NULL,
drba->drba_cred, drba->drba_proc);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
/* can't recv below anything but filesystems (eg. no ZVOLs) */
error = dmu_objset_from_ds(ds, &os);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (dmu_objset_type(os) != DMU_OST_ZFS) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
}
if (drba->drba_origin != NULL) {
dsl_dataset_t *origin;
error = dsl_dataset_hold_flags(dp, drba->drba_origin,
dsflags, FTAG, &origin);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (!origin->ds_is_snapshot) {
dsl_dataset_rele_flags(origin, dsflags, FTAG);
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
if (dsl_dataset_phys(origin)->ds_guid != fromguid &&
fromguid != 0) {
dsl_dataset_rele_flags(origin, dsflags, FTAG);
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENODEV));
}
if (origin->ds_dir->dd_crypto_obj != 0 &&
(featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) {
dsl_dataset_rele_flags(origin, dsflags, FTAG);
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* If the origin is redacted we need to verify that this
* send stream can safely be received on top of the
* origin.
*/
if (dsl_dataset_feature_is_active(origin,
SPA_FEATURE_REDACTED_DATASETS)) {
if (!redact_check(drba, origin)) {
dsl_dataset_rele_flags(origin, dsflags,
FTAG);
dsl_dataset_rele_flags(ds, dsflags,
FTAG);
return (SET_ERROR(EINVAL));
}
}
error = recv_check_large_blocks(ds, featureflags);
if (error != 0) {
dsl_dataset_rele_flags(origin, dsflags, FTAG);
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (error);
}
dsl_dataset_rele_flags(origin, dsflags, FTAG);
}
dsl_dataset_rele(ds, FTAG);
error = 0;
}
return (error);
}
static void
dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
{
dmu_recv_begin_arg_t *drba = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
objset_t *mos = dp->dp_meta_objset;
dmu_recv_cookie_t *drc = drba->drba_cookie;
struct drr_begin *drrb = drc->drc_drrb;
const char *tofs = drc->drc_tofs;
uint64_t featureflags = drc->drc_featureflags;
dsl_dataset_t *ds, *newds;
objset_t *os;
uint64_t dsobj;
ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
int error;
uint64_t crflags = 0;
dsl_crypto_params_t dummy_dcp = { 0 };
dsl_crypto_params_t *dcp = drba->drba_dcp;
if (drrb->drr_flags & DRR_FLAG_CI_DATA)
crflags |= DS_FLAG_CI_DATASET;
if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0)
dsflags |= DS_HOLD_FLAG_DECRYPT;
/*
* Raw, non-incremental recvs always use a dummy dcp with
* the raw cmd set. Raw incremental recvs do not use a dcp
* since the encryption parameters are already set in stone.
*/
if (dcp == NULL && drrb->drr_fromguid == 0 &&
drba->drba_origin == NULL) {
ASSERT3P(dcp, ==, NULL);
dcp = &dummy_dcp;
if (featureflags & DMU_BACKUP_FEATURE_RAW)
dcp->cp_cmd = DCP_CMD_RAW_RECV;
}
error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
if (error == 0) {
/* Create temporary clone unless we're doing corrective recv */
dsl_dataset_t *snap = NULL;
if (drba->drba_cookie->drc_fromsnapobj != 0) {
VERIFY0(dsl_dataset_hold_obj(dp,
drba->drba_cookie->drc_fromsnapobj, FTAG, &snap));
ASSERT3P(dcp, ==, NULL);
}
if (drc->drc_heal) {
/* When healing we want to use the provided snapshot */
VERIFY0(dsl_dataset_snap_lookup(ds, drc->drc_tosnap,
&dsobj));
} else {
dsobj = dsl_dataset_create_sync(ds->ds_dir,
recv_clone_name, snap, crflags, drba->drba_cred,
dcp, tx);
}
if (drba->drba_cookie->drc_fromsnapobj != 0)
dsl_dataset_rele(snap, FTAG);
dsl_dataset_rele_flags(ds, dsflags, FTAG);
} else {
dsl_dir_t *dd;
const char *tail;
dsl_dataset_t *origin = NULL;
VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
if (drba->drba_origin != NULL) {
VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
FTAG, &origin));
ASSERT3P(dcp, ==, NULL);
}
/* Create new dataset. */
dsobj = dsl_dataset_create_sync(dd, strrchr(tofs, '/') + 1,
origin, crflags, drba->drba_cred, dcp, tx);
if (origin != NULL)
dsl_dataset_rele(origin, FTAG);
dsl_dir_rele(dd, FTAG);
drc->drc_newfs = B_TRUE;
}
VERIFY0(dsl_dataset_own_obj_force(dp, dsobj, dsflags, dmu_recv_tag,
&newds));
if (dsl_dataset_feature_is_active(newds,
SPA_FEATURE_REDACTED_DATASETS)) {
/*
* If the origin dataset is redacted, the child will be redacted
* when we create it. We clear the new dataset's
* redaction info; if it should be redacted, we'll fill
* in its information later.
*/
dsl_dataset_deactivate_feature(newds,
SPA_FEATURE_REDACTED_DATASETS, tx);
}
VERIFY0(dmu_objset_from_ds(newds, &os));
if (drc->drc_resumable) {
dsl_dataset_zapify(newds, tx);
if (drrb->drr_fromguid != 0) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID,
8, 1, &drrb->drr_fromguid, tx));
}
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID,
8, 1, &drrb->drr_toguid, tx));
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME,
1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx));
uint64_t one = 1;
uint64_t zero = 0;
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT,
8, 1, &one, tx));
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET,
8, 1, &zero, tx));
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES,
8, 1, &zero, tx));
if (featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_LARGEBLOCK,
8, 1, &one, tx));
}
if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK,
8, 1, &one, tx));
}
if (featureflags & DMU_BACKUP_FEATURE_COMPRESSED) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_COMPRESSOK,
8, 1, &one, tx));
}
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_RAWOK,
8, 1, &one, tx));
}
uint64_t *redact_snaps;
uint_t numredactsnaps;
if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
BEGINNV_REDACT_FROM_SNAPS, &redact_snaps,
&numredactsnaps) == 0) {
VERIFY0(zap_add(mos, dsobj,
DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS,
sizeof (*redact_snaps), numredactsnaps,
redact_snaps, tx));
}
}
/*
* Usually the os->os_encrypted value is tied to the presence of a
* DSL Crypto Key object in the dd. However, that will not be received
* until dmu_recv_stream(), so we set the value manually for now.
*/
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
os->os_encrypted = B_TRUE;
drba->drba_cookie->drc_raw = B_TRUE;
}
if (featureflags & DMU_BACKUP_FEATURE_REDACTED) {
uint64_t *redact_snaps;
uint_t numredactsnaps;
VERIFY0(nvlist_lookup_uint64_array(drc->drc_begin_nvl,
BEGINNV_REDACT_SNAPS, &redact_snaps, &numredactsnaps));
dsl_dataset_activate_redaction(newds, redact_snaps,
numredactsnaps, tx);
}
dmu_buf_will_dirty(newds->ds_dbuf, tx);
dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
/*
* If we actually created a non-clone, we need to create the objset
* in our new dataset. If this is a raw send we postpone this until
* dmu_recv_stream() so that we can allocate the metadnode with the
* properties from the DRR_BEGIN payload.
*/
rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG);
if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds)) &&
(featureflags & DMU_BACKUP_FEATURE_RAW) == 0 &&
!drc->drc_heal) {
(void) dmu_objset_create_impl(dp->dp_spa,
newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
}
rrw_exit(&newds->ds_bp_rwlock, FTAG);
drba->drba_cookie->drc_ds = newds;
drba->drba_cookie->drc_os = os;
spa_history_log_internal_ds(newds, "receive", tx, " ");
}
static int
dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx)
{
dmu_recv_begin_arg_t *drba = arg;
dmu_recv_cookie_t *drc = drba->drba_cookie;
dsl_pool_t *dp = dmu_tx_pool(tx);
struct drr_begin *drrb = drc->drc_drrb;
int error;
ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
dsl_dataset_t *ds;
const char *tofs = drc->drc_tofs;
/* already checked */
ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
ASSERT(drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING);
if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
DMU_COMPOUNDSTREAM ||
drrb->drr_type >= DMU_OST_NUMTYPES)
return (SET_ERROR(EINVAL));
/*
* This is mostly a sanity check since we should have already done these
* checks during a previous attempt to receive the data.
*/
error = recv_begin_check_feature_flags_impl(drc->drc_featureflags,
dp->dp_spa);
if (error != 0)
return (error);
/* 6 extra bytes for /%recv */
char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
(void) snprintf(recvname, sizeof (recvname), "%s/%s",
tofs, recv_clone_name);
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) {
/* raw receives require spill block allocation flag */
if (!(drrb->drr_flags & DRR_FLAG_SPILL_BLOCK))
return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING));
} else {
dsflags |= DS_HOLD_FLAG_DECRYPT;
}
boolean_t recvexist = B_TRUE;
if (dsl_dataset_hold_flags(dp, recvname, dsflags, FTAG, &ds) != 0) {
/* %recv does not exist; continue in tofs */
recvexist = B_FALSE;
error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
if (error != 0)
return (error);
}
/*
* Resume of full/newfs recv on existing dataset should be done with
* force flag
*/
if (recvexist && drrb->drr_fromguid == 0 && !drc->drc_force) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(ZFS_ERR_RESUME_EXISTS));
}
/* check that ds is marked inconsistent */
if (!DS_IS_INCONSISTENT(ds)) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
/* check that there is resuming data, and that the toguid matches */
if (!dsl_dataset_is_zapified(ds)) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
uint64_t val;
error = zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val);
if (error != 0 || drrb->drr_toguid != val) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Check if the receive is still running. If so, it will be owned.
* Note that nothing else can own the dataset (e.g. after the receive
* fails) because it will be marked inconsistent.
*/
if (dsl_dataset_has_owner(ds)) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EBUSY));
}
/* There should not be any snapshots of this fs yet. */
if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Note: resume point will be checked when we process the first WRITE
* record.
*/
/* check that the origin matches */
val = 0;
(void) zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val);
if (drrb->drr_fromguid != val) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
if (ds->ds_prev != NULL && drrb->drr_fromguid != 0)
drc->drc_fromsnapobj = ds->ds_prev->ds_object;
/*
* If we're resuming, and the send is redacted, then the original send
* must have been redacted, and must have been redacted with respect to
* the same snapshots.
*/
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_REDACTED) {
uint64_t num_ds_redact_snaps;
uint64_t *ds_redact_snaps;
uint_t num_stream_redact_snaps;
uint64_t *stream_redact_snaps;
if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
BEGINNV_REDACT_SNAPS, &stream_redact_snaps,
&num_stream_redact_snaps) != 0) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
if (!dsl_dataset_get_uint64_array_feature(ds,
SPA_FEATURE_REDACTED_DATASETS, &num_ds_redact_snaps,
&ds_redact_snaps)) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
for (int i = 0; i < num_ds_redact_snaps; i++) {
if (!redact_snaps_contains(ds_redact_snaps,
num_ds_redact_snaps, stream_redact_snaps[i])) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
}
}
error = recv_check_large_blocks(ds, drc->drc_featureflags);
if (error != 0) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (error);
}
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (0);
}
static void
dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx)
{
dmu_recv_begin_arg_t *drba = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
const char *tofs = drba->drba_cookie->drc_tofs;
uint64_t featureflags = drba->drba_cookie->drc_featureflags;
dsl_dataset_t *ds;
ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
/* 6 extra bytes for /%recv */
char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
(void) snprintf(recvname, sizeof (recvname), "%s/%s", tofs,
recv_clone_name);
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
drba->drba_cookie->drc_raw = B_TRUE;
} else {
dsflags |= DS_HOLD_FLAG_DECRYPT;
}
if (dsl_dataset_own_force(dp, recvname, dsflags, dmu_recv_tag, &ds)
!= 0) {
/* %recv does not exist; continue in tofs */
VERIFY0(dsl_dataset_own_force(dp, tofs, dsflags, dmu_recv_tag,
&ds));
drba->drba_cookie->drc_newfs = B_TRUE;
}
ASSERT(DS_IS_INCONSISTENT(ds));
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)) ||
drba->drba_cookie->drc_raw);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
drba->drba_cookie->drc_ds = ds;
VERIFY0(dmu_objset_from_ds(ds, &drba->drba_cookie->drc_os));
drba->drba_cookie->drc_should_save = B_TRUE;
spa_history_log_internal_ds(ds, "resume receive", tx, " ");
}
/*
* NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
* succeeds; otherwise we will leak the holds on the datasets.
*/
int
dmu_recv_begin(const char *tofs, const char *tosnap,
dmu_replay_record_t *drr_begin, boolean_t force, boolean_t heal,
boolean_t resumable, nvlist_t *localprops, nvlist_t *hidden_args,
const char *origin, dmu_recv_cookie_t *drc, zfs_file_t *fp,
offset_t *voffp)
{
dmu_recv_begin_arg_t drba = { 0 };
int err = 0;
memset(drc, 0, sizeof (dmu_recv_cookie_t));
drc->drc_drr_begin = drr_begin;
drc->drc_drrb = &drr_begin->drr_u.drr_begin;
drc->drc_tosnap = tosnap;
drc->drc_tofs = tofs;
drc->drc_force = force;
drc->drc_heal = heal;
drc->drc_resumable = resumable;
drc->drc_cred = CRED();
drc->drc_proc = curproc;
drc->drc_clone = (origin != NULL);
if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
drc->drc_byteswap = B_TRUE;
(void) fletcher_4_incremental_byteswap(drr_begin,
sizeof (dmu_replay_record_t), &drc->drc_cksum);
byteswap_record(drr_begin);
} else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) {
(void) fletcher_4_incremental_native(drr_begin,
sizeof (dmu_replay_record_t), &drc->drc_cksum);
} else {
return (SET_ERROR(EINVAL));
}
drc->drc_fp = fp;
drc->drc_voff = *voffp;
drc->drc_featureflags =
DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
uint32_t payloadlen = drc->drc_drr_begin->drr_payloadlen;
/*
* Since OpenZFS 2.0.0, we have enforced a 64MB limit in userspace
* configurable via ZFS_SENDRECV_MAX_NVLIST. We enforce 256MB as a hard
* upper limit. Systems with less than 1GB of RAM will see a lower
* limit from `arc_all_memory() / 4`.
*/
if (payloadlen > (MIN((1U << 28), arc_all_memory() / 4)))
return (E2BIG);
if (payloadlen != 0) {
void *payload = vmem_alloc(payloadlen, KM_SLEEP);
/*
* For compatibility with recursive send streams, we don't do
* this here if the stream could be part of a package. Instead,
* we'll do it in dmu_recv_stream. If we pull the next header
* too early, and it's the END record, we break the `recv_skip`
* logic.
*/
err = receive_read_payload_and_next_header(drc, payloadlen,
payload);
if (err != 0) {
vmem_free(payload, payloadlen);
return (err);
}
err = nvlist_unpack(payload, payloadlen, &drc->drc_begin_nvl,
KM_SLEEP);
vmem_free(payload, payloadlen);
if (err != 0) {
kmem_free(drc->drc_next_rrd,
sizeof (*drc->drc_next_rrd));
return (err);
}
}
if (drc->drc_drrb->drr_flags & DRR_FLAG_SPILL_BLOCK)
drc->drc_spill = B_TRUE;
drba.drba_origin = origin;
drba.drba_cookie = drc;
drba.drba_cred = CRED();
drba.drba_proc = curproc;
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) {
err = dsl_sync_task(tofs,
dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync,
&drba, 5, ZFS_SPACE_CHECK_NORMAL);
} else {
/*
* For non-raw, non-incremental, non-resuming receives the
* user can specify encryption parameters on the command line
* with "zfs recv -o". For these receives we create a dcp and
* pass it to the sync task. Creating the dcp will implicitly
* remove the encryption params from the localprops nvlist,
* which avoids errors when trying to set these normally
* read-only properties. Any other kind of receive that
* attempts to set these properties will fail as a result.
*/
if ((DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_RAW) == 0 &&
origin == NULL && drc->drc_drrb->drr_fromguid == 0) {
err = dsl_crypto_params_create_nvlist(DCP_CMD_NONE,
localprops, hidden_args, &drba.drba_dcp);
}
if (err == 0) {
err = dsl_sync_task(tofs,
dmu_recv_begin_check, dmu_recv_begin_sync,
&drba, 5, ZFS_SPACE_CHECK_NORMAL);
dsl_crypto_params_free(drba.drba_dcp, !!err);
}
}
if (err != 0) {
kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
nvlist_free(drc->drc_begin_nvl);
}
return (err);
}
/*
* Holds data need for corrective recv callback
*/
typedef struct cr_cb_data {
uint64_t size;
zbookmark_phys_t zb;
spa_t *spa;
} cr_cb_data_t;
static void
corrective_read_done(zio_t *zio)
{
cr_cb_data_t *data = zio->io_private;
/* Corruption corrected; update error log if needed */
if (zio->io_error == 0)
spa_remove_error(data->spa, &data->zb, &zio->io_bp->blk_birth);
kmem_free(data, sizeof (cr_cb_data_t));
abd_free(zio->io_abd);
}
/*
* zio_rewrite the data pointed to by bp with the data from the rrd's abd.
*/
static int
do_corrective_recv(struct receive_writer_arg *rwa, struct drr_write *drrw,
struct receive_record_arg *rrd, blkptr_t *bp)
{
int err;
zio_t *io;
zbookmark_phys_t zb;
dnode_t *dn;
abd_t *abd = rrd->abd;
zio_cksum_t bp_cksum = bp->blk_cksum;
zio_flag_t flags = ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_RETRY |
ZIO_FLAG_CANFAIL;
if (rwa->raw)
flags |= ZIO_FLAG_RAW;
err = dnode_hold(rwa->os, drrw->drr_object, FTAG, &dn);
if (err != 0)
return (err);
SET_BOOKMARK(&zb, dmu_objset_id(rwa->os), drrw->drr_object, 0,
dbuf_whichblock(dn, 0, drrw->drr_offset));
dnode_rele(dn, FTAG);
if (!rwa->raw && DRR_WRITE_COMPRESSED(drrw)) {
/* Decompress the stream data */
abd_t *dabd = abd_alloc_linear(
drrw->drr_logical_size, B_FALSE);
err = zio_decompress_data(drrw->drr_compressiontype,
abd, abd_to_buf(dabd), abd_get_size(abd),
abd_get_size(dabd), NULL);
if (err != 0) {
abd_free(dabd);
return (err);
}
/* Swap in the newly decompressed data into the abd */
abd_free(abd);
abd = dabd;
}
if (!rwa->raw && BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
/* Recompress the data */
abd_t *cabd = abd_alloc_linear(BP_GET_PSIZE(bp),
B_FALSE);
void *buf = abd_to_buf(cabd);
uint64_t csize = zio_compress_data(BP_GET_COMPRESS(bp),
abd, &buf, abd_get_size(abd),
rwa->os->os_complevel);
abd_zero_off(cabd, csize, BP_GET_PSIZE(bp) - csize);
/* Swap in newly compressed data into the abd */
abd_free(abd);
abd = cabd;
flags |= ZIO_FLAG_RAW_COMPRESS;
}
/*
* The stream is not encrypted but the data on-disk is.
* We need to re-encrypt the buf using the same
* encryption type, salt, iv, and mac that was used to encrypt
* the block previosly.
*/
if (!rwa->raw && BP_USES_CRYPT(bp)) {
dsl_dataset_t *ds;
dsl_crypto_key_t *dck = NULL;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
dsl_pool_t *dp = dmu_objset_pool(rwa->os);
abd_t *eabd = abd_alloc_linear(BP_GET_PSIZE(bp), B_FALSE);
zio_crypt_decode_params_bp(bp, salt, iv);
zio_crypt_decode_mac_bp(bp, mac);
dsl_pool_config_enter(dp, FTAG);
err = dsl_dataset_hold_flags(dp, rwa->tofs,
DS_HOLD_FLAG_DECRYPT, FTAG, &ds);
if (err != 0) {
dsl_pool_config_exit(dp, FTAG);
abd_free(eabd);
return (SET_ERROR(EACCES));
}
/* Look up the key from the spa's keystore */
err = spa_keystore_lookup_key(rwa->os->os_spa,
zb.zb_objset, FTAG, &dck);
if (err != 0) {
dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT,
FTAG);
dsl_pool_config_exit(dp, FTAG);
abd_free(eabd);
return (SET_ERROR(EACCES));
}
err = zio_do_crypt_abd(B_TRUE, &dck->dck_key,
BP_GET_TYPE(bp), BP_SHOULD_BYTESWAP(bp), salt, iv,
mac, abd_get_size(abd), abd, eabd, &no_crypt);
spa_keystore_dsl_key_rele(rwa->os->os_spa, dck, FTAG);
dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
dsl_pool_config_exit(dp, FTAG);
ASSERT0(no_crypt);
if (err != 0) {
abd_free(eabd);
return (err);
}
/* Swap in the newly encrypted data into the abd */
abd_free(abd);
abd = eabd;
/*
* We want to prevent zio_rewrite() from trying to
* encrypt the data again
*/
flags |= ZIO_FLAG_RAW_ENCRYPT;
}
rrd->abd = abd;
io = zio_rewrite(NULL, rwa->os->os_spa, bp->blk_birth, bp, abd,
BP_GET_PSIZE(bp), NULL, NULL, ZIO_PRIORITY_SYNC_WRITE, flags, &zb);
ASSERT(abd_get_size(abd) == BP_GET_LSIZE(bp) ||
abd_get_size(abd) == BP_GET_PSIZE(bp));
/* compute new bp checksum value and make sure it matches the old one */
zio_checksum_compute(io, BP_GET_CHECKSUM(bp), abd, abd_get_size(abd));
if (!ZIO_CHECKSUM_EQUAL(bp_cksum, io->io_bp->blk_cksum)) {
zio_destroy(io);
if (zfs_recv_best_effort_corrective != 0)
return (0);
return (SET_ERROR(ECKSUM));
}
/* Correct the corruption in place */
err = zio_wait(io);
if (err == 0) {
cr_cb_data_t *cb_data =
kmem_alloc(sizeof (cr_cb_data_t), KM_SLEEP);
cb_data->spa = rwa->os->os_spa;
cb_data->size = drrw->drr_logical_size;
cb_data->zb = zb;
/* Test if healing worked by re-reading the bp */
err = zio_wait(zio_read(rwa->heal_pio, rwa->os->os_spa, bp,
abd_alloc_for_io(drrw->drr_logical_size, B_FALSE),
drrw->drr_logical_size, corrective_read_done,
cb_data, ZIO_PRIORITY_ASYNC_READ, flags, NULL));
}
if (err != 0 && zfs_recv_best_effort_corrective != 0)
err = 0;
return (err);
}
static int
receive_read(dmu_recv_cookie_t *drc, int len, void *buf)
{
int done = 0;
/*
* The code doesn't rely on this (lengths being multiples of 8). See
* comment in dump_bytes.
*/
ASSERT(len % 8 == 0 ||
(drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) != 0);
while (done < len) {
ssize_t resid = len - done;
zfs_file_t *fp = drc->drc_fp;
int err = zfs_file_read(fp, (char *)buf + done,
len - done, &resid);
if (err == 0 && resid == len - done) {
/*
* Note: ECKSUM or ZFS_ERR_STREAM_TRUNCATED indicates
* that the receive was interrupted and can
* potentially be resumed.
*/
err = SET_ERROR(ZFS_ERR_STREAM_TRUNCATED);
}
drc->drc_voff += len - done - resid;
done = len - resid;
if (err != 0)
return (err);
}
drc->drc_bytes_read += len;
ASSERT3U(done, ==, len);
return (0);
}
static inline uint8_t
deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
{
if (bonus_type == DMU_OT_SA) {
return (1);
} else {
return (1 +
((DN_OLD_MAX_BONUSLEN -
MIN(DN_OLD_MAX_BONUSLEN, bonus_size)) >> SPA_BLKPTRSHIFT));
}
}
static void
save_resume_state(struct receive_writer_arg *rwa,
uint64_t object, uint64_t offset, dmu_tx_t *tx)
{
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
if (!rwa->resumable)
return;
/*
* We use ds_resume_bytes[] != 0 to indicate that we need to
* update this on disk, so it must not be 0.
*/
ASSERT(rwa->bytes_read != 0);
/*
* We only resume from write records, which have a valid
* (non-meta-dnode) object number.
*/
ASSERT(object != 0);
/*
* For resuming to work correctly, we must receive records in order,
* sorted by object,offset. This is checked by the callers, but
* assert it here for good measure.
*/
ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]);
ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] ||
offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]);
ASSERT3U(rwa->bytes_read, >=,
rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]);
rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object;
rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset;
rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read;
}
static int
receive_object_is_same_generation(objset_t *os, uint64_t object,
dmu_object_type_t old_bonus_type, dmu_object_type_t new_bonus_type,
const void *new_bonus, boolean_t *samegenp)
{
zfs_file_info_t zoi;
int err;
dmu_buf_t *old_bonus_dbuf;
err = dmu_bonus_hold(os, object, FTAG, &old_bonus_dbuf);
if (err != 0)
return (err);
err = dmu_get_file_info(os, old_bonus_type, old_bonus_dbuf->db_data,
&zoi);
dmu_buf_rele(old_bonus_dbuf, FTAG);
if (err != 0)
return (err);
uint64_t old_gen = zoi.zfi_generation;
err = dmu_get_file_info(os, new_bonus_type, new_bonus, &zoi);
if (err != 0)
return (err);
uint64_t new_gen = zoi.zfi_generation;
*samegenp = (old_gen == new_gen);
return (0);
}
static int
receive_handle_existing_object(const struct receive_writer_arg *rwa,
const struct drr_object *drro, const dmu_object_info_t *doi,
const void *bonus_data,
uint64_t *object_to_hold, uint32_t *new_blksz)
{
uint32_t indblksz = drro->drr_indblkshift ?
1ULL << drro->drr_indblkshift : 0;
int nblkptr = deduce_nblkptr(drro->drr_bonustype,
drro->drr_bonuslen);
uint8_t dn_slots = drro->drr_dn_slots != 0 ?
drro->drr_dn_slots : DNODE_MIN_SLOTS;
boolean_t do_free_range = B_FALSE;
int err;
*object_to_hold = drro->drr_object;
/* nblkptr should be bounded by the bonus size and type */
if (rwa->raw && nblkptr != drro->drr_nblkptr)
return (SET_ERROR(EINVAL));
/*
* After the previous send stream, the sending system may
* have freed this object, and then happened to re-allocate
* this object number in a later txg. In this case, we are
* receiving a different logical file, and the block size may
* appear to be different. i.e. we may have a different
* block size for this object than what the send stream says.
* In this case we need to remove the object's contents,
* so that its structure can be changed and then its contents
* entirely replaced by subsequent WRITE records.
*
* If this is a -L (--large-block) incremental stream, and
* the previous stream was not -L, the block size may appear
* to increase. i.e. we may have a smaller block size for
* this object than what the send stream says. In this case
* we need to keep the object's contents and block size
* intact, so that we don't lose parts of the object's
* contents that are not changed by this incremental send
* stream.
*
* We can distinguish between the two above cases by using
* the ZPL's generation number (see
* receive_object_is_same_generation()). However, we only
* want to rely on the generation number when absolutely
* necessary, because with raw receives, the generation is
* encrypted. We also want to minimize dependence on the
* ZPL, so that other types of datasets can also be received
* (e.g. ZVOLs, although note that ZVOLS currently do not
* reallocate their objects or change their structure).
* Therefore, we check a number of different cases where we
* know it is safe to discard the object's contents, before
* using the ZPL's generation number to make the above
* distinction.
*/
if (drro->drr_blksz != doi->doi_data_block_size) {
if (rwa->raw) {
/*
* RAW streams always have large blocks, so
* we are sure that the data is not needed
* due to changing --large-block to be on.
* Which is fortunate since the bonus buffer
* (which contains the ZPL generation) is
* encrypted, and the key might not be
* loaded.
*/
do_free_range = B_TRUE;
} else if (rwa->full) {
/*
* This is a full send stream, so it always
* replaces what we have. Even if the
* generation numbers happen to match, this
* can not actually be the same logical file.
* This is relevant when receiving a full
* send as a clone.
*/
do_free_range = B_TRUE;
} else if (drro->drr_type !=
DMU_OT_PLAIN_FILE_CONTENTS ||
doi->doi_type != DMU_OT_PLAIN_FILE_CONTENTS) {
/*
* PLAIN_FILE_CONTENTS are the only type of
* objects that have ever been stored with
* large blocks, so we don't need the special
* logic below. ZAP blocks can shrink (when
* there's only one block), so we don't want
* to hit the error below about block size
* only increasing.
*/
do_free_range = B_TRUE;
} else if (doi->doi_max_offset <=
doi->doi_data_block_size) {
/*
* There is only one block. We can free it,
* because its contents will be replaced by a
* WRITE record. This can not be the no-L ->
* -L case, because the no-L case would have
* resulted in multiple blocks. If we
* supported -L -> no-L, it would not be safe
* to free the file's contents. Fortunately,
* that is not allowed (see
* recv_check_large_blocks()).
*/
do_free_range = B_TRUE;
} else {
boolean_t is_same_gen;
err = receive_object_is_same_generation(rwa->os,
drro->drr_object, doi->doi_bonus_type,
drro->drr_bonustype, bonus_data, &is_same_gen);
if (err != 0)
return (SET_ERROR(EINVAL));
if (is_same_gen) {
/*
* This is the same logical file, and
* the block size must be increasing.
* It could only decrease if
* --large-block was changed to be
* off, which is checked in
* recv_check_large_blocks().
*/
if (drro->drr_blksz <=
doi->doi_data_block_size)
return (SET_ERROR(EINVAL));
/*
* We keep the existing blocksize and
* contents.
*/
*new_blksz =
doi->doi_data_block_size;
} else {
do_free_range = B_TRUE;
}
}
}
/* nblkptr can only decrease if the object was reallocated */
if (nblkptr < doi->doi_nblkptr)
do_free_range = B_TRUE;
/* number of slots can only change on reallocation */
if (dn_slots != doi->doi_dnodesize >> DNODE_SHIFT)
do_free_range = B_TRUE;
/*
* For raw sends we also check a few other fields to
* ensure we are preserving the objset structure exactly
* as it was on the receive side:
* - A changed indirect block size
* - A smaller nlevels
*/
if (rwa->raw) {
if (indblksz != doi->doi_metadata_block_size)
do_free_range = B_TRUE;
if (drro->drr_nlevels < doi->doi_indirection)
do_free_range = B_TRUE;
}
if (do_free_range) {
err = dmu_free_long_range(rwa->os, drro->drr_object,
0, DMU_OBJECT_END);
if (err != 0)
return (SET_ERROR(EINVAL));
}
/*
- * The dmu does not currently support decreasing nlevels
- * or changing the number of dnode slots on an object. For
- * non-raw sends, this does not matter and the new object
- * can just use the previous one's nlevels. For raw sends,
- * however, the structure of the received dnode (including
- * nlevels and dnode slots) must match that of the send
- * side. Therefore, instead of using dmu_object_reclaim(),
- * we must free the object completely and call
- * dmu_object_claim_dnsize() instead.
+ * The dmu does not currently support decreasing nlevels or changing
+ * indirect block size if there is already one, same as changing the
+ * number of of dnode slots on an object. For non-raw sends this
+ * does not matter and the new object can just use the previous one's
+ * parameters. For raw sends, however, the structure of the received
+ * dnode (including indirects and dnode slots) must match that of the
+ * send side. Therefore, instead of using dmu_object_reclaim(), we
+ * must free the object completely and call dmu_object_claim_dnsize()
+ * instead.
*/
- if ((rwa->raw && drro->drr_nlevels < doi->doi_indirection) ||
+ if ((rwa->raw && ((doi->doi_indirection > 1 &&
+ indblksz != doi->doi_metadata_block_size) ||
+ drro->drr_nlevels < doi->doi_indirection)) ||
dn_slots != doi->doi_dnodesize >> DNODE_SHIFT) {
err = dmu_free_long_object(rwa->os, drro->drr_object);
if (err != 0)
return (SET_ERROR(EINVAL));
txg_wait_synced(dmu_objset_pool(rwa->os), 0);
*object_to_hold = DMU_NEW_OBJECT;
}
/*
* For raw receives, free everything beyond the new incoming
* maxblkid. Normally this would be done with a DRR_FREE
* record that would come after this DRR_OBJECT record is
* processed. However, for raw receives we manually set the
* maxblkid from the drr_maxblkid and so we must first free
* everything above that blkid to ensure the DMU is always
* consistent with itself. We will never free the first block
* of the object here because a maxblkid of 0 could indicate
* an object with a single block or one with no blocks. This
* free may be skipped when dmu_free_long_range() was called
* above since it covers the entire object's contents.
*/
if (rwa->raw && *object_to_hold != DMU_NEW_OBJECT && !do_free_range) {
err = dmu_free_long_range(rwa->os, drro->drr_object,
(drro->drr_maxblkid + 1) * doi->doi_data_block_size,
DMU_OBJECT_END);
if (err != 0)
return (SET_ERROR(EINVAL));
}
return (0);
}
noinline static int
receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
void *data)
{
dmu_object_info_t doi;
dmu_tx_t *tx;
int err;
uint32_t new_blksz = drro->drr_blksz;
uint8_t dn_slots = drro->drr_dn_slots != 0 ?
drro->drr_dn_slots : DNODE_MIN_SLOTS;
if (drro->drr_type == DMU_OT_NONE ||
!DMU_OT_IS_VALID(drro->drr_type) ||
!DMU_OT_IS_VALID(drro->drr_bonustype) ||
drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
drro->drr_blksz < SPA_MINBLOCKSIZE ||
drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
drro->drr_bonuslen >
DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) ||
dn_slots >
(spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) {
return (SET_ERROR(EINVAL));
}
if (rwa->raw) {
/*
* We should have received a DRR_OBJECT_RANGE record
* containing this block and stored it in rwa.
*/
if (drro->drr_object < rwa->or_firstobj ||
drro->drr_object >= rwa->or_firstobj + rwa->or_numslots ||
drro->drr_raw_bonuslen < drro->drr_bonuslen ||
drro->drr_indblkshift > SPA_MAXBLOCKSHIFT ||
drro->drr_nlevels > DN_MAX_LEVELS ||
drro->drr_nblkptr > DN_MAX_NBLKPTR ||
DN_SLOTS_TO_BONUSLEN(dn_slots) <
drro->drr_raw_bonuslen)
return (SET_ERROR(EINVAL));
} else {
/*
* The DRR_OBJECT_SPILL flag is valid when the DRR_BEGIN
* record indicates this by setting DRR_FLAG_SPILL_BLOCK.
*/
if (((drro->drr_flags & ~(DRR_OBJECT_SPILL))) ||
(!rwa->spill && DRR_OBJECT_HAS_SPILL(drro->drr_flags))) {
return (SET_ERROR(EINVAL));
}
if (drro->drr_raw_bonuslen != 0 || drro->drr_nblkptr != 0 ||
drro->drr_indblkshift != 0 || drro->drr_nlevels != 0) {
return (SET_ERROR(EINVAL));
}
}
err = dmu_object_info(rwa->os, drro->drr_object, &doi);
if (err != 0 && err != ENOENT && err != EEXIST)
return (SET_ERROR(EINVAL));
if (drro->drr_object > rwa->max_object)
rwa->max_object = drro->drr_object;
/*
* If we are losing blkptrs or changing the block size this must
* be a new file instance. We must clear out the previous file
* contents before we can change this type of metadata in the dnode.
* Raw receives will also check that the indirect structure of the
* dnode hasn't changed.
*/
uint64_t object_to_hold;
if (err == 0) {
err = receive_handle_existing_object(rwa, drro, &doi, data,
&object_to_hold, &new_blksz);
if (err != 0)
return (err);
} else if (err == EEXIST) {
/*
* The object requested is currently an interior slot of a
* multi-slot dnode. This will be resolved when the next txg
* is synced out, since the send stream will have told us
* to free this slot when we freed the associated dnode
* earlier in the stream.
*/
txg_wait_synced(dmu_objset_pool(rwa->os), 0);
if (dmu_object_info(rwa->os, drro->drr_object, NULL) != ENOENT)
return (SET_ERROR(EINVAL));
/* object was freed and we are about to allocate a new one */
object_to_hold = DMU_NEW_OBJECT;
} else {
/*
* If the only record in this range so far was DRR_FREEOBJECTS
* with at least one actually freed object, it's possible that
* the block will now be converted to a hole. We need to wait
* for the txg to sync to prevent races.
*/
if (rwa->or_need_sync == ORNS_YES)
txg_wait_synced(dmu_objset_pool(rwa->os), 0);
/* object is free and we are about to allocate a new one */
object_to_hold = DMU_NEW_OBJECT;
}
/* Only relevant for the first object in the range */
rwa->or_need_sync = ORNS_NO;
/*
* If this is a multi-slot dnode there is a chance that this
* object will expand into a slot that is already used by
* another object from the previous snapshot. We must free
* these objects before we attempt to allocate the new dnode.
*/
if (dn_slots > 1) {
boolean_t need_sync = B_FALSE;
for (uint64_t slot = drro->drr_object + 1;
slot < drro->drr_object + dn_slots;
slot++) {
dmu_object_info_t slot_doi;
err = dmu_object_info(rwa->os, slot, &slot_doi);
if (err == ENOENT || err == EEXIST)
continue;
else if (err != 0)
return (err);
err = dmu_free_long_object(rwa->os, slot);
if (err != 0)
return (err);
need_sync = B_TRUE;
}
if (need_sync)
txg_wait_synced(dmu_objset_pool(rwa->os), 0);
}
tx = dmu_tx_create(rwa->os);
dmu_tx_hold_bonus(tx, object_to_hold);
dmu_tx_hold_write(tx, object_to_hold, 0, 0);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_tx_abort(tx);
return (err);
}
if (object_to_hold == DMU_NEW_OBJECT) {
/* Currently free, wants to be allocated */
err = dmu_object_claim_dnsize(rwa->os, drro->drr_object,
drro->drr_type, new_blksz,
drro->drr_bonustype, drro->drr_bonuslen,
dn_slots << DNODE_SHIFT, tx);
} else if (drro->drr_type != doi.doi_type ||
new_blksz != doi.doi_data_block_size ||
drro->drr_bonustype != doi.doi_bonus_type ||
drro->drr_bonuslen != doi.doi_bonus_size) {
/* Currently allocated, but with different properties */
err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object,
drro->drr_type, new_blksz,
drro->drr_bonustype, drro->drr_bonuslen,
dn_slots << DNODE_SHIFT, rwa->spill ?
DRR_OBJECT_HAS_SPILL(drro->drr_flags) : B_FALSE, tx);
} else if (rwa->spill && !DRR_OBJECT_HAS_SPILL(drro->drr_flags)) {
/*
* Currently allocated, the existing version of this object
* may reference a spill block that is no longer allocated
* at the source and needs to be freed.
*/
err = dmu_object_rm_spill(rwa->os, drro->drr_object, tx);
}
if (err != 0) {
dmu_tx_commit(tx);
return (SET_ERROR(EINVAL));
}
if (rwa->or_crypt_params_present) {
/*
* Set the crypt params for the buffer associated with this
* range of dnodes. This causes the blkptr_t to have the
* same crypt params (byteorder, salt, iv, mac) as on the
* sending side.
*
* Since we are committing this tx now, it is possible for
* the dnode block to end up on-disk with the incorrect MAC,
* if subsequent objects in this block are received in a
* different txg. However, since the dataset is marked as
* inconsistent, no code paths will do a non-raw read (or
* decrypt the block / verify the MAC). The receive code and
* scrub code can safely do raw reads and verify the
* checksum. They don't need to verify the MAC.
*/
dmu_buf_t *db = NULL;
uint64_t offset = rwa->or_firstobj * DNODE_MIN_SIZE;
err = dmu_buf_hold_by_dnode(DMU_META_DNODE(rwa->os),
offset, FTAG, &db, DMU_READ_PREFETCH | DMU_READ_NO_DECRYPT);
if (err != 0) {
dmu_tx_commit(tx);
return (SET_ERROR(EINVAL));
}
dmu_buf_set_crypt_params(db, rwa->or_byteorder,
rwa->or_salt, rwa->or_iv, rwa->or_mac, tx);
dmu_buf_rele(db, FTAG);
rwa->or_crypt_params_present = B_FALSE;
}
dmu_object_set_checksum(rwa->os, drro->drr_object,
drro->drr_checksumtype, tx);
dmu_object_set_compress(rwa->os, drro->drr_object,
drro->drr_compress, tx);
/* handle more restrictive dnode structuring for raw recvs */
if (rwa->raw) {
/*
* Set the indirect block size, block shift, nlevels.
* This will not fail because we ensured all of the
* blocks were freed earlier if this is a new object.
* For non-new objects block size and indirect block
* shift cannot change and nlevels can only increase.
*/
ASSERT3U(new_blksz, ==, drro->drr_blksz);
VERIFY0(dmu_object_set_blocksize(rwa->os, drro->drr_object,
drro->drr_blksz, drro->drr_indblkshift, tx));
VERIFY0(dmu_object_set_nlevels(rwa->os, drro->drr_object,
drro->drr_nlevels, tx));
/*
* Set the maxblkid. This will always succeed because
* we freed all blocks beyond the new maxblkid above.
*/
VERIFY0(dmu_object_set_maxblkid(rwa->os, drro->drr_object,
drro->drr_maxblkid, tx));
}
if (data != NULL) {
dmu_buf_t *db;
dnode_t *dn;
uint32_t flags = DMU_READ_NO_PREFETCH;
if (rwa->raw)
flags |= DMU_READ_NO_DECRYPT;
VERIFY0(dnode_hold(rwa->os, drro->drr_object, FTAG, &dn));
VERIFY0(dmu_bonus_hold_by_dnode(dn, FTAG, &db, flags));
dmu_buf_will_dirty(db, tx);
ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
memcpy(db->db_data, data, DRR_OBJECT_PAYLOAD_SIZE(drro));
/*
* Raw bonus buffers have their byteorder determined by the
* DRR_OBJECT_RANGE record.
*/
if (rwa->byteswap && !rwa->raw) {
dmu_object_byteswap_t byteswap =
DMU_OT_BYTESWAP(drro->drr_bonustype);
dmu_ot_byteswap[byteswap].ob_func(db->db_data,
DRR_OBJECT_PAYLOAD_SIZE(drro));
}
dmu_buf_rele(db, FTAG);
dnode_rele(dn, FTAG);
}
dmu_tx_commit(tx);
return (0);
}
noinline static int
receive_freeobjects(struct receive_writer_arg *rwa,
struct drr_freeobjects *drrfo)
{
uint64_t obj;
int next_err = 0;
if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
return (SET_ERROR(EINVAL));
for (obj = drrfo->drr_firstobj == 0 ? 1 : drrfo->drr_firstobj;
obj < drrfo->drr_firstobj + drrfo->drr_numobjs &&
obj < DN_MAX_OBJECT && next_err == 0;
next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) {
dmu_object_info_t doi;
int err;
err = dmu_object_info(rwa->os, obj, &doi);
if (err == ENOENT)
continue;
else if (err != 0)
return (err);
err = dmu_free_long_object(rwa->os, obj);
if (err != 0)
return (err);
if (rwa->or_need_sync == ORNS_MAYBE)
rwa->or_need_sync = ORNS_YES;
}
if (next_err != ESRCH)
return (next_err);
return (0);
}
/*
* Note: if this fails, the caller will clean up any records left on the
* rwa->write_batch list.
*/
static int
flush_write_batch_impl(struct receive_writer_arg *rwa)
{
dnode_t *dn;
int err;
if (dnode_hold(rwa->os, rwa->last_object, FTAG, &dn) != 0)
return (SET_ERROR(EINVAL));
struct receive_record_arg *last_rrd = list_tail(&rwa->write_batch);
struct drr_write *last_drrw = &last_rrd->header.drr_u.drr_write;
struct receive_record_arg *first_rrd = list_head(&rwa->write_batch);
struct drr_write *first_drrw = &first_rrd->header.drr_u.drr_write;
ASSERT3U(rwa->last_object, ==, last_drrw->drr_object);
ASSERT3U(rwa->last_offset, ==, last_drrw->drr_offset);
dmu_tx_t *tx = dmu_tx_create(rwa->os);
dmu_tx_hold_write_by_dnode(tx, dn, first_drrw->drr_offset,
last_drrw->drr_offset - first_drrw->drr_offset +
last_drrw->drr_logical_size);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_tx_abort(tx);
dnode_rele(dn, FTAG);
return (err);
}
struct receive_record_arg *rrd;
while ((rrd = list_head(&rwa->write_batch)) != NULL) {
struct drr_write *drrw = &rrd->header.drr_u.drr_write;
abd_t *abd = rrd->abd;
ASSERT3U(drrw->drr_object, ==, rwa->last_object);
if (drrw->drr_logical_size != dn->dn_datablksz) {
/*
* The WRITE record is larger than the object's block
* size. We must be receiving an incremental
* large-block stream into a dataset that previously did
* a non-large-block receive. Lightweight writes must
* be exactly one block, so we need to decompress the
* data (if compressed) and do a normal dmu_write().
*/
ASSERT3U(drrw->drr_logical_size, >, dn->dn_datablksz);
if (DRR_WRITE_COMPRESSED(drrw)) {
abd_t *decomp_abd =
abd_alloc_linear(drrw->drr_logical_size,
B_FALSE);
err = zio_decompress_data(
drrw->drr_compressiontype,
abd, abd_to_buf(decomp_abd),
abd_get_size(abd),
abd_get_size(decomp_abd), NULL);
if (err == 0) {
dmu_write_by_dnode(dn,
drrw->drr_offset,
drrw->drr_logical_size,
abd_to_buf(decomp_abd), tx);
}
abd_free(decomp_abd);
} else {
dmu_write_by_dnode(dn,
drrw->drr_offset,
drrw->drr_logical_size,
abd_to_buf(abd), tx);
}
if (err == 0)
abd_free(abd);
} else {
zio_prop_t zp = {0};
dmu_write_policy(rwa->os, dn, 0, 0, &zp);
zio_flag_t zio_flags = 0;
if (rwa->raw) {
zp.zp_encrypt = B_TRUE;
zp.zp_compress = drrw->drr_compressiontype;
zp.zp_byteorder = ZFS_HOST_BYTEORDER ^
!!DRR_IS_RAW_BYTESWAPPED(drrw->drr_flags) ^
rwa->byteswap;
memcpy(zp.zp_salt, drrw->drr_salt,
ZIO_DATA_SALT_LEN);
memcpy(zp.zp_iv, drrw->drr_iv,
ZIO_DATA_IV_LEN);
memcpy(zp.zp_mac, drrw->drr_mac,
ZIO_DATA_MAC_LEN);
if (DMU_OT_IS_ENCRYPTED(zp.zp_type)) {
zp.zp_nopwrite = B_FALSE;
zp.zp_copies = MIN(zp.zp_copies,
SPA_DVAS_PER_BP - 1);
}
zio_flags |= ZIO_FLAG_RAW;
} else if (DRR_WRITE_COMPRESSED(drrw)) {
ASSERT3U(drrw->drr_compressed_size, >, 0);
ASSERT3U(drrw->drr_logical_size, >=,
drrw->drr_compressed_size);
zp.zp_compress = drrw->drr_compressiontype;
zio_flags |= ZIO_FLAG_RAW_COMPRESS;
} else if (rwa->byteswap) {
/*
* Note: compressed blocks never need to be
* byteswapped, because WRITE records for
* metadata blocks are never compressed. The
* exception is raw streams, which are written
* in the original byteorder, and the byteorder
* bit is preserved in the BP by setting
* zp_byteorder above.
*/
dmu_object_byteswap_t byteswap =
DMU_OT_BYTESWAP(drrw->drr_type);
dmu_ot_byteswap[byteswap].ob_func(
abd_to_buf(abd),
DRR_WRITE_PAYLOAD_SIZE(drrw));
}
/*
* Since this data can't be read until the receive
* completes, we can do a "lightweight" write for
* improved performance.
*/
err = dmu_lightweight_write_by_dnode(dn,
drrw->drr_offset, abd, &zp, zio_flags, tx);
}
if (err != 0) {
/*
* This rrd is left on the list, so the caller will
* free it (and the abd).
*/
break;
}
/*
* Note: If the receive fails, we want the resume stream to
* start with the same record that we last successfully
* received (as opposed to the next record), so that we can
* verify that we are resuming from the correct location.
*/
save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
list_remove(&rwa->write_batch, rrd);
kmem_free(rrd, sizeof (*rrd));
}
dmu_tx_commit(tx);
dnode_rele(dn, FTAG);
return (err);
}
noinline static int
flush_write_batch(struct receive_writer_arg *rwa)
{
if (list_is_empty(&rwa->write_batch))
return (0);
int err = rwa->err;
if (err == 0)
err = flush_write_batch_impl(rwa);
if (err != 0) {
struct receive_record_arg *rrd;
while ((rrd = list_remove_head(&rwa->write_batch)) != NULL) {
abd_free(rrd->abd);
kmem_free(rrd, sizeof (*rrd));
}
}
ASSERT(list_is_empty(&rwa->write_batch));
return (err);
}
noinline static int
receive_process_write_record(struct receive_writer_arg *rwa,
struct receive_record_arg *rrd)
{
int err = 0;
ASSERT3U(rrd->header.drr_type, ==, DRR_WRITE);
struct drr_write *drrw = &rrd->header.drr_u.drr_write;
if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset ||
!DMU_OT_IS_VALID(drrw->drr_type))
return (SET_ERROR(EINVAL));
if (rwa->heal) {
blkptr_t *bp;
dmu_buf_t *dbp;
dnode_t *dn;
int flags = DB_RF_CANFAIL;
if (rwa->raw)
flags |= DB_RF_NO_DECRYPT;
if (rwa->byteswap) {
dmu_object_byteswap_t byteswap =
DMU_OT_BYTESWAP(drrw->drr_type);
dmu_ot_byteswap[byteswap].ob_func(abd_to_buf(rrd->abd),
DRR_WRITE_PAYLOAD_SIZE(drrw));
}
err = dmu_buf_hold_noread(rwa->os, drrw->drr_object,
drrw->drr_offset, FTAG, &dbp);
if (err != 0)
return (err);
/* Try to read the object to see if it needs healing */
err = dbuf_read((dmu_buf_impl_t *)dbp, NULL, flags);
/*
* We only try to heal when dbuf_read() returns a ECKSUMs.
* Other errors (even EIO) get returned to caller.
* EIO indicates that the device is not present/accessible,
* so writing to it will likely fail.
* If the block is healthy, we don't want to overwrite it
* unnecessarily.
*/
if (err != ECKSUM) {
dmu_buf_rele(dbp, FTAG);
return (err);
}
dn = dmu_buf_dnode_enter(dbp);
/* Make sure the on-disk block and recv record sizes match */
if (drrw->drr_logical_size !=
dn->dn_datablkszsec << SPA_MINBLOCKSHIFT) {
err = ENOTSUP;
dmu_buf_dnode_exit(dbp);
dmu_buf_rele(dbp, FTAG);
return (err);
}
/* Get the block pointer for the corrupted block */
bp = dmu_buf_get_blkptr(dbp);
err = do_corrective_recv(rwa, drrw, rrd, bp);
dmu_buf_dnode_exit(dbp);
dmu_buf_rele(dbp, FTAG);
return (err);
}
/*
* For resuming to work, records must be in increasing order
* by (object, offset).
*/
if (drrw->drr_object < rwa->last_object ||
(drrw->drr_object == rwa->last_object &&
drrw->drr_offset < rwa->last_offset)) {
return (SET_ERROR(EINVAL));
}
struct receive_record_arg *first_rrd = list_head(&rwa->write_batch);
struct drr_write *first_drrw = &first_rrd->header.drr_u.drr_write;
uint64_t batch_size =
MIN(zfs_recv_write_batch_size, DMU_MAX_ACCESS / 2);
if (first_rrd != NULL &&
(drrw->drr_object != first_drrw->drr_object ||
drrw->drr_offset >= first_drrw->drr_offset + batch_size)) {
err = flush_write_batch(rwa);
if (err != 0)
return (err);
}
rwa->last_object = drrw->drr_object;
rwa->last_offset = drrw->drr_offset;
if (rwa->last_object > rwa->max_object)
rwa->max_object = rwa->last_object;
list_insert_tail(&rwa->write_batch, rrd);
/*
* Return EAGAIN to indicate that we will use this rrd again,
* so the caller should not free it
*/
return (EAGAIN);
}
static int
receive_write_embedded(struct receive_writer_arg *rwa,
struct drr_write_embedded *drrwe, void *data)
{
dmu_tx_t *tx;
int err;
if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset)
return (SET_ERROR(EINVAL));
if (drrwe->drr_psize > BPE_PAYLOAD_SIZE)
return (SET_ERROR(EINVAL));
if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES)
return (SET_ERROR(EINVAL));
if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
return (SET_ERROR(EINVAL));
if (rwa->raw)
return (SET_ERROR(EINVAL));
if (drrwe->drr_object > rwa->max_object)
rwa->max_object = drrwe->drr_object;
tx = dmu_tx_create(rwa->os);
dmu_tx_hold_write(tx, drrwe->drr_object,
drrwe->drr_offset, drrwe->drr_length);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_tx_abort(tx);
return (err);
}
dmu_write_embedded(rwa->os, drrwe->drr_object,
drrwe->drr_offset, data, drrwe->drr_etype,
drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize,
rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
/* See comment in restore_write. */
save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx);
dmu_tx_commit(tx);
return (0);
}
static int
receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
abd_t *abd)
{
dmu_buf_t *db, *db_spill;
int err;
if (drrs->drr_length < SPA_MINBLOCKSIZE ||
drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
return (SET_ERROR(EINVAL));
/*
* This is an unmodified spill block which was added to the stream
* to resolve an issue with incorrectly removing spill blocks. It
* should be ignored by current versions of the code which support
* the DRR_FLAG_SPILL_BLOCK flag.
*/
if (rwa->spill && DRR_SPILL_IS_UNMODIFIED(drrs->drr_flags)) {
abd_free(abd);
return (0);
}
if (rwa->raw) {
if (!DMU_OT_IS_VALID(drrs->drr_type) ||
drrs->drr_compressiontype >= ZIO_COMPRESS_FUNCTIONS ||
drrs->drr_compressed_size == 0)
return (SET_ERROR(EINVAL));
}
if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
return (SET_ERROR(EINVAL));
if (drrs->drr_object > rwa->max_object)
rwa->max_object = drrs->drr_object;
VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
if ((err = dmu_spill_hold_by_bonus(db, DMU_READ_NO_DECRYPT, FTAG,
&db_spill)) != 0) {
dmu_buf_rele(db, FTAG);
return (err);
}
dmu_tx_t *tx = dmu_tx_create(rwa->os);
dmu_tx_hold_spill(tx, db->db_object);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_buf_rele(db, FTAG);
dmu_buf_rele(db_spill, FTAG);
dmu_tx_abort(tx);
return (err);
}
/*
* Spill blocks may both grow and shrink. When a change in size
* occurs any existing dbuf must be updated to match the logical
* size of the provided arc_buf_t.
*/
if (db_spill->db_size != drrs->drr_length) {
dmu_buf_will_fill(db_spill, tx);
VERIFY0(dbuf_spill_set_blksz(db_spill,
drrs->drr_length, tx));
}
arc_buf_t *abuf;
if (rwa->raw) {
boolean_t byteorder = ZFS_HOST_BYTEORDER ^
!!DRR_IS_RAW_BYTESWAPPED(drrs->drr_flags) ^
rwa->byteswap;
abuf = arc_loan_raw_buf(dmu_objset_spa(rwa->os),
drrs->drr_object, byteorder, drrs->drr_salt,
drrs->drr_iv, drrs->drr_mac, drrs->drr_type,
drrs->drr_compressed_size, drrs->drr_length,
drrs->drr_compressiontype, 0);
} else {
abuf = arc_loan_buf(dmu_objset_spa(rwa->os),
DMU_OT_IS_METADATA(drrs->drr_type),
drrs->drr_length);
if (rwa->byteswap) {
dmu_object_byteswap_t byteswap =
DMU_OT_BYTESWAP(drrs->drr_type);
dmu_ot_byteswap[byteswap].ob_func(abd_to_buf(abd),
DRR_SPILL_PAYLOAD_SIZE(drrs));
}
}
memcpy(abuf->b_data, abd_to_buf(abd), DRR_SPILL_PAYLOAD_SIZE(drrs));
abd_free(abd);
dbuf_assign_arcbuf((dmu_buf_impl_t *)db_spill, abuf, tx);
dmu_buf_rele(db, FTAG);
dmu_buf_rele(db_spill, FTAG);
dmu_tx_commit(tx);
return (0);
}
noinline static int
receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
{
int err;
if (drrf->drr_length != -1ULL &&
drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
return (SET_ERROR(EINVAL));
if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
return (SET_ERROR(EINVAL));
if (drrf->drr_object > rwa->max_object)
rwa->max_object = drrf->drr_object;
err = dmu_free_long_range(rwa->os, drrf->drr_object,
drrf->drr_offset, drrf->drr_length);
return (err);
}
static int
receive_object_range(struct receive_writer_arg *rwa,
struct drr_object_range *drror)
{
/*
* By default, we assume this block is in our native format
* (ZFS_HOST_BYTEORDER). We then take into account whether
* the send stream is byteswapped (rwa->byteswap). Finally,
* we need to byteswap again if this particular block was
* in non-native format on the send side.
*/
boolean_t byteorder = ZFS_HOST_BYTEORDER ^ rwa->byteswap ^
!!DRR_IS_RAW_BYTESWAPPED(drror->drr_flags);
/*
* Since dnode block sizes are constant, we should not need to worry
* about making sure that the dnode block size is the same on the
* sending and receiving sides for the time being. For non-raw sends,
* this does not matter (and in fact we do not send a DRR_OBJECT_RANGE
* record at all). Raw sends require this record type because the
* encryption parameters are used to protect an entire block of bonus
* buffers. If the size of dnode blocks ever becomes variable,
* handling will need to be added to ensure that dnode block sizes
* match on the sending and receiving side.
*/
if (drror->drr_numslots != DNODES_PER_BLOCK ||
P2PHASE(drror->drr_firstobj, DNODES_PER_BLOCK) != 0 ||
!rwa->raw)
return (SET_ERROR(EINVAL));
if (drror->drr_firstobj > rwa->max_object)
rwa->max_object = drror->drr_firstobj;
/*
* The DRR_OBJECT_RANGE handling must be deferred to receive_object()
* so that the block of dnodes is not written out when it's empty,
* and converted to a HOLE BP.
*/
rwa->or_crypt_params_present = B_TRUE;
rwa->or_firstobj = drror->drr_firstobj;
rwa->or_numslots = drror->drr_numslots;
memcpy(rwa->or_salt, drror->drr_salt, ZIO_DATA_SALT_LEN);
memcpy(rwa->or_iv, drror->drr_iv, ZIO_DATA_IV_LEN);
memcpy(rwa->or_mac, drror->drr_mac, ZIO_DATA_MAC_LEN);
rwa->or_byteorder = byteorder;
rwa->or_need_sync = ORNS_MAYBE;
return (0);
}
/*
* Until we have the ability to redact large ranges of data efficiently, we
* process these records as frees.
*/
noinline static int
receive_redact(struct receive_writer_arg *rwa, struct drr_redact *drrr)
{
struct drr_free drrf = {0};
drrf.drr_length = drrr->drr_length;
drrf.drr_object = drrr->drr_object;
drrf.drr_offset = drrr->drr_offset;
drrf.drr_toguid = drrr->drr_toguid;
return (receive_free(rwa, &drrf));
}
/* used to destroy the drc_ds on error */
static void
dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
{
dsl_dataset_t *ds = drc->drc_ds;
ds_hold_flags_t dsflags;
dsflags = (drc->drc_raw) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
/*
* Wait for the txg sync before cleaning up the receive. For
* resumable receives, this ensures that our resume state has
* been written out to disk. For raw receives, this ensures
* that the user accounting code will not attempt to do anything
* after we stopped receiving the dataset.
*/
txg_wait_synced(ds->ds_dir->dd_pool, 0);
ds->ds_objset->os_raw_receive = B_FALSE;
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
if (drc->drc_resumable && drc->drc_should_save &&
!BP_IS_HOLE(dsl_dataset_get_blkptr(ds))) {
rrw_exit(&ds->ds_bp_rwlock, FTAG);
dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
} else {
char name[ZFS_MAX_DATASET_NAME_LEN];
rrw_exit(&ds->ds_bp_rwlock, FTAG);
dsl_dataset_name(ds, name);
dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
if (!drc->drc_heal)
(void) dsl_destroy_head(name);
}
}
static void
receive_cksum(dmu_recv_cookie_t *drc, int len, void *buf)
{
if (drc->drc_byteswap) {
(void) fletcher_4_incremental_byteswap(buf, len,
&drc->drc_cksum);
} else {
(void) fletcher_4_incremental_native(buf, len, &drc->drc_cksum);
}
}
/*
* Read the payload into a buffer of size len, and update the current record's
* payload field.
* Allocate drc->drc_next_rrd and read the next record's header into
* drc->drc_next_rrd->header.
* Verify checksum of payload and next record.
*/
static int
receive_read_payload_and_next_header(dmu_recv_cookie_t *drc, int len, void *buf)
{
int err;
if (len != 0) {
ASSERT3U(len, <=, SPA_MAXBLOCKSIZE);
err = receive_read(drc, len, buf);
if (err != 0)
return (err);
receive_cksum(drc, len, buf);
/* note: rrd is NULL when reading the begin record's payload */
if (drc->drc_rrd != NULL) {
drc->drc_rrd->payload = buf;
drc->drc_rrd->payload_size = len;
drc->drc_rrd->bytes_read = drc->drc_bytes_read;
}
} else {
ASSERT3P(buf, ==, NULL);
}
drc->drc_prev_cksum = drc->drc_cksum;
drc->drc_next_rrd = kmem_zalloc(sizeof (*drc->drc_next_rrd), KM_SLEEP);
err = receive_read(drc, sizeof (drc->drc_next_rrd->header),
&drc->drc_next_rrd->header);
drc->drc_next_rrd->bytes_read = drc->drc_bytes_read;
if (err != 0) {
kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
drc->drc_next_rrd = NULL;
return (err);
}
if (drc->drc_next_rrd->header.drr_type == DRR_BEGIN) {
kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
drc->drc_next_rrd = NULL;
return (SET_ERROR(EINVAL));
}
/*
* Note: checksum is of everything up to but not including the
* checksum itself.
*/
ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
receive_cksum(drc,
offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
&drc->drc_next_rrd->header);
zio_cksum_t cksum_orig =
drc->drc_next_rrd->header.drr_u.drr_checksum.drr_checksum;
zio_cksum_t *cksump =
&drc->drc_next_rrd->header.drr_u.drr_checksum.drr_checksum;
if (drc->drc_byteswap)
byteswap_record(&drc->drc_next_rrd->header);
if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) &&
!ZIO_CHECKSUM_EQUAL(drc->drc_cksum, *cksump)) {
kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
drc->drc_next_rrd = NULL;
return (SET_ERROR(ECKSUM));
}
receive_cksum(drc, sizeof (cksum_orig), &cksum_orig);
return (0);
}
/*
* Issue the prefetch reads for any necessary indirect blocks.
*
* We use the object ignore list to tell us whether or not to issue prefetches
* for a given object. We do this for both correctness (in case the blocksize
* of an object has changed) and performance (if the object doesn't exist, don't
* needlessly try to issue prefetches). We also trim the list as we go through
* the stream to prevent it from growing to an unbounded size.
*
* The object numbers within will always be in sorted order, and any write
* records we see will also be in sorted order, but they're not sorted with
* respect to each other (i.e. we can get several object records before
* receiving each object's write records). As a result, once we've reached a
* given object number, we can safely remove any reference to lower object
* numbers in the ignore list. In practice, we receive up to 32 object records
* before receiving write records, so the list can have up to 32 nodes in it.
*/
static void
receive_read_prefetch(dmu_recv_cookie_t *drc, uint64_t object, uint64_t offset,
uint64_t length)
{
if (!objlist_exists(drc->drc_ignore_objlist, object)) {
dmu_prefetch(drc->drc_os, object, 1, offset, length,
ZIO_PRIORITY_SYNC_READ);
}
}
/*
* Read records off the stream, issuing any necessary prefetches.
*/
static int
receive_read_record(dmu_recv_cookie_t *drc)
{
int err;
switch (drc->drc_rrd->header.drr_type) {
case DRR_OBJECT:
{
struct drr_object *drro =
&drc->drc_rrd->header.drr_u.drr_object;
uint32_t size = DRR_OBJECT_PAYLOAD_SIZE(drro);
void *buf = NULL;
dmu_object_info_t doi;
if (size != 0)
buf = kmem_zalloc(size, KM_SLEEP);
err = receive_read_payload_and_next_header(drc, size, buf);
if (err != 0) {
kmem_free(buf, size);
return (err);
}
err = dmu_object_info(drc->drc_os, drro->drr_object, &doi);
/*
* See receive_read_prefetch for an explanation why we're
* storing this object in the ignore_obj_list.
*/
if (err == ENOENT || err == EEXIST ||
(err == 0 && doi.doi_data_block_size != drro->drr_blksz)) {
objlist_insert(drc->drc_ignore_objlist,
drro->drr_object);
err = 0;
}
return (err);
}
case DRR_FREEOBJECTS:
{
err = receive_read_payload_and_next_header(drc, 0, NULL);
return (err);
}
case DRR_WRITE:
{
struct drr_write *drrw = &drc->drc_rrd->header.drr_u.drr_write;
int size = DRR_WRITE_PAYLOAD_SIZE(drrw);
abd_t *abd = abd_alloc_linear(size, B_FALSE);
err = receive_read_payload_and_next_header(drc, size,
abd_to_buf(abd));
if (err != 0) {
abd_free(abd);
return (err);
}
drc->drc_rrd->abd = abd;
receive_read_prefetch(drc, drrw->drr_object, drrw->drr_offset,
drrw->drr_logical_size);
return (err);
}
case DRR_WRITE_EMBEDDED:
{
struct drr_write_embedded *drrwe =
&drc->drc_rrd->header.drr_u.drr_write_embedded;
uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8);
void *buf = kmem_zalloc(size, KM_SLEEP);
err = receive_read_payload_and_next_header(drc, size, buf);
if (err != 0) {
kmem_free(buf, size);
return (err);
}
receive_read_prefetch(drc, drrwe->drr_object, drrwe->drr_offset,
drrwe->drr_length);
return (err);
}
case DRR_FREE:
case DRR_REDACT:
{
/*
* It might be beneficial to prefetch indirect blocks here, but
* we don't really have the data to decide for sure.
*/
err = receive_read_payload_and_next_header(drc, 0, NULL);
return (err);
}
case DRR_END:
{
struct drr_end *drre = &drc->drc_rrd->header.drr_u.drr_end;
if (!ZIO_CHECKSUM_EQUAL(drc->drc_prev_cksum,
drre->drr_checksum))
return (SET_ERROR(ECKSUM));
return (0);
}
case DRR_SPILL:
{
struct drr_spill *drrs = &drc->drc_rrd->header.drr_u.drr_spill;
int size = DRR_SPILL_PAYLOAD_SIZE(drrs);
abd_t *abd = abd_alloc_linear(size, B_FALSE);
err = receive_read_payload_and_next_header(drc, size,
abd_to_buf(abd));
if (err != 0)
abd_free(abd);
else
drc->drc_rrd->abd = abd;
return (err);
}
case DRR_OBJECT_RANGE:
{
err = receive_read_payload_and_next_header(drc, 0, NULL);
return (err);
}
default:
return (SET_ERROR(EINVAL));
}
}
static void
dprintf_drr(struct receive_record_arg *rrd, int err)
{
#ifdef ZFS_DEBUG
switch (rrd->header.drr_type) {
case DRR_OBJECT:
{
struct drr_object *drro = &rrd->header.drr_u.drr_object;
dprintf("drr_type = OBJECT obj = %llu type = %u "
"bonustype = %u blksz = %u bonuslen = %u cksumtype = %u "
"compress = %u dn_slots = %u err = %d\n",
(u_longlong_t)drro->drr_object, drro->drr_type,
drro->drr_bonustype, drro->drr_blksz, drro->drr_bonuslen,
drro->drr_checksumtype, drro->drr_compress,
drro->drr_dn_slots, err);
break;
}
case DRR_FREEOBJECTS:
{
struct drr_freeobjects *drrfo =
&rrd->header.drr_u.drr_freeobjects;
dprintf("drr_type = FREEOBJECTS firstobj = %llu "
"numobjs = %llu err = %d\n",
(u_longlong_t)drrfo->drr_firstobj,
(u_longlong_t)drrfo->drr_numobjs, err);
break;
}
case DRR_WRITE:
{
struct drr_write *drrw = &rrd->header.drr_u.drr_write;
dprintf("drr_type = WRITE obj = %llu type = %u offset = %llu "
"lsize = %llu cksumtype = %u flags = %u "
"compress = %u psize = %llu err = %d\n",
(u_longlong_t)drrw->drr_object, drrw->drr_type,
(u_longlong_t)drrw->drr_offset,
(u_longlong_t)drrw->drr_logical_size,
drrw->drr_checksumtype, drrw->drr_flags,
drrw->drr_compressiontype,
(u_longlong_t)drrw->drr_compressed_size, err);
break;
}
case DRR_WRITE_BYREF:
{
struct drr_write_byref *drrwbr =
&rrd->header.drr_u.drr_write_byref;
dprintf("drr_type = WRITE_BYREF obj = %llu offset = %llu "
"length = %llu toguid = %llx refguid = %llx "
"refobject = %llu refoffset = %llu cksumtype = %u "
"flags = %u err = %d\n",
(u_longlong_t)drrwbr->drr_object,
(u_longlong_t)drrwbr->drr_offset,
(u_longlong_t)drrwbr->drr_length,
(u_longlong_t)drrwbr->drr_toguid,
(u_longlong_t)drrwbr->drr_refguid,
(u_longlong_t)drrwbr->drr_refobject,
(u_longlong_t)drrwbr->drr_refoffset,
drrwbr->drr_checksumtype, drrwbr->drr_flags, err);
break;
}
case DRR_WRITE_EMBEDDED:
{
struct drr_write_embedded *drrwe =
&rrd->header.drr_u.drr_write_embedded;
dprintf("drr_type = WRITE_EMBEDDED obj = %llu offset = %llu "
"length = %llu compress = %u etype = %u lsize = %u "
"psize = %u err = %d\n",
(u_longlong_t)drrwe->drr_object,
(u_longlong_t)drrwe->drr_offset,
(u_longlong_t)drrwe->drr_length,
drrwe->drr_compression, drrwe->drr_etype,
drrwe->drr_lsize, drrwe->drr_psize, err);
break;
}
case DRR_FREE:
{
struct drr_free *drrf = &rrd->header.drr_u.drr_free;
dprintf("drr_type = FREE obj = %llu offset = %llu "
"length = %lld err = %d\n",
(u_longlong_t)drrf->drr_object,
(u_longlong_t)drrf->drr_offset,
(longlong_t)drrf->drr_length,
err);
break;
}
case DRR_SPILL:
{
struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
dprintf("drr_type = SPILL obj = %llu length = %llu "
"err = %d\n", (u_longlong_t)drrs->drr_object,
(u_longlong_t)drrs->drr_length, err);
break;
}
case DRR_OBJECT_RANGE:
{
struct drr_object_range *drror =
&rrd->header.drr_u.drr_object_range;
dprintf("drr_type = OBJECT_RANGE firstobj = %llu "
"numslots = %llu flags = %u err = %d\n",
(u_longlong_t)drror->drr_firstobj,
(u_longlong_t)drror->drr_numslots,
drror->drr_flags, err);
break;
}
default:
return;
}
#endif
}
/*
* Commit the records to the pool.
*/
static int
receive_process_record(struct receive_writer_arg *rwa,
struct receive_record_arg *rrd)
{
int err;
/* Processing in order, therefore bytes_read should be increasing. */
ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read);
rwa->bytes_read = rrd->bytes_read;
/* We can only heal write records; other ones get ignored */
if (rwa->heal && rrd->header.drr_type != DRR_WRITE) {
if (rrd->abd != NULL) {
abd_free(rrd->abd);
rrd->abd = NULL;
} else if (rrd->payload != NULL) {
kmem_free(rrd->payload, rrd->payload_size);
rrd->payload = NULL;
}
return (0);
}
if (!rwa->heal && rrd->header.drr_type != DRR_WRITE) {
err = flush_write_batch(rwa);
if (err != 0) {
if (rrd->abd != NULL) {
abd_free(rrd->abd);
rrd->abd = NULL;
rrd->payload = NULL;
} else if (rrd->payload != NULL) {
kmem_free(rrd->payload, rrd->payload_size);
rrd->payload = NULL;
}
return (err);
}
}
switch (rrd->header.drr_type) {
case DRR_OBJECT:
{
struct drr_object *drro = &rrd->header.drr_u.drr_object;
err = receive_object(rwa, drro, rrd->payload);
kmem_free(rrd->payload, rrd->payload_size);
rrd->payload = NULL;
break;
}
case DRR_FREEOBJECTS:
{
struct drr_freeobjects *drrfo =
&rrd->header.drr_u.drr_freeobjects;
err = receive_freeobjects(rwa, drrfo);
break;
}
case DRR_WRITE:
{
err = receive_process_write_record(rwa, rrd);
if (rwa->heal) {
/*
* If healing - always free the abd after processing
*/
abd_free(rrd->abd);
rrd->abd = NULL;
} else if (err != EAGAIN) {
/*
* On success, a non-healing
* receive_process_write_record() returns
* EAGAIN to indicate that we do not want to free
* the rrd or arc_buf.
*/
ASSERT(err != 0);
abd_free(rrd->abd);
rrd->abd = NULL;
}
break;
}
case DRR_WRITE_EMBEDDED:
{
struct drr_write_embedded *drrwe =
&rrd->header.drr_u.drr_write_embedded;
err = receive_write_embedded(rwa, drrwe, rrd->payload);
kmem_free(rrd->payload, rrd->payload_size);
rrd->payload = NULL;
break;
}
case DRR_FREE:
{
struct drr_free *drrf = &rrd->header.drr_u.drr_free;
err = receive_free(rwa, drrf);
break;
}
case DRR_SPILL:
{
struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
err = receive_spill(rwa, drrs, rrd->abd);
if (err != 0)
abd_free(rrd->abd);
rrd->abd = NULL;
rrd->payload = NULL;
break;
}
case DRR_OBJECT_RANGE:
{
struct drr_object_range *drror =
&rrd->header.drr_u.drr_object_range;
err = receive_object_range(rwa, drror);
break;
}
case DRR_REDACT:
{
struct drr_redact *drrr = &rrd->header.drr_u.drr_redact;
err = receive_redact(rwa, drrr);
break;
}
default:
err = (SET_ERROR(EINVAL));
}
if (err != 0)
dprintf_drr(rrd, err);
return (err);
}
/*
* dmu_recv_stream's worker thread; pull records off the queue, and then call
* receive_process_record When we're done, signal the main thread and exit.
*/
static __attribute__((noreturn)) void
receive_writer_thread(void *arg)
{
struct receive_writer_arg *rwa = arg;
struct receive_record_arg *rrd;
fstrans_cookie_t cookie = spl_fstrans_mark();
for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
rrd = bqueue_dequeue(&rwa->q)) {
/*
* If there's an error, the main thread will stop putting things
* on the queue, but we need to clear everything in it before we
* can exit.
*/
int err = 0;
if (rwa->err == 0) {
err = receive_process_record(rwa, rrd);
} else if (rrd->abd != NULL) {
abd_free(rrd->abd);
rrd->abd = NULL;
rrd->payload = NULL;
} else if (rrd->payload != NULL) {
kmem_free(rrd->payload, rrd->payload_size);
rrd->payload = NULL;
}
/*
* EAGAIN indicates that this record has been saved (on
* raw->write_batch), and will be used again, so we don't
* free it.
* When healing data we always need to free the record.
*/
if (err != EAGAIN || rwa->heal) {
if (rwa->err == 0)
rwa->err = err;
kmem_free(rrd, sizeof (*rrd));
}
}
kmem_free(rrd, sizeof (*rrd));
if (rwa->heal) {
zio_wait(rwa->heal_pio);
} else {
int err = flush_write_batch(rwa);
if (rwa->err == 0)
rwa->err = err;
}
mutex_enter(&rwa->mutex);
rwa->done = B_TRUE;
cv_signal(&rwa->cv);
mutex_exit(&rwa->mutex);
spl_fstrans_unmark(cookie);
thread_exit();
}
static int
resume_check(dmu_recv_cookie_t *drc, nvlist_t *begin_nvl)
{
uint64_t val;
objset_t *mos = dmu_objset_pool(drc->drc_os)->dp_meta_objset;
uint64_t dsobj = dmu_objset_id(drc->drc_os);
uint64_t resume_obj, resume_off;
if (nvlist_lookup_uint64(begin_nvl,
"resume_object", &resume_obj) != 0 ||
nvlist_lookup_uint64(begin_nvl,
"resume_offset", &resume_off) != 0) {
return (SET_ERROR(EINVAL));
}
VERIFY0(zap_lookup(mos, dsobj,
DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val));
if (resume_obj != val)
return (SET_ERROR(EINVAL));
VERIFY0(zap_lookup(mos, dsobj,
DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val));
if (resume_off != val)
return (SET_ERROR(EINVAL));
return (0);
}
/*
* Read in the stream's records, one by one, and apply them to the pool. There
* are two threads involved; the thread that calls this function will spin up a
* worker thread, read the records off the stream one by one, and issue
* prefetches for any necessary indirect blocks. It will then push the records
* onto an internal blocking queue. The worker thread will pull the records off
* the queue, and actually write the data into the DMU. This way, the worker
* thread doesn't have to wait for reads to complete, since everything it needs
* (the indirect blocks) will be prefetched.
*
* NB: callers *must* call dmu_recv_end() if this succeeds.
*/
int
dmu_recv_stream(dmu_recv_cookie_t *drc, offset_t *voffp)
{
int err = 0;
struct receive_writer_arg *rwa = kmem_zalloc(sizeof (*rwa), KM_SLEEP);
if (dsl_dataset_has_resume_receive_state(drc->drc_ds)) {
uint64_t bytes = 0;
(void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset,
drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES,
sizeof (bytes), 1, &bytes);
drc->drc_bytes_read += bytes;
}
drc->drc_ignore_objlist = objlist_create();
/* these were verified in dmu_recv_begin */
ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
DMU_SUBSTREAM);
ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
ASSERT0(drc->drc_os->os_encrypted &&
(drc->drc_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA));
/* handle DSL encryption key payload */
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) {
nvlist_t *keynvl = NULL;
ASSERT(drc->drc_os->os_encrypted);
ASSERT(drc->drc_raw);
err = nvlist_lookup_nvlist(drc->drc_begin_nvl, "crypt_keydata",
&keynvl);
if (err != 0)
goto out;
if (!drc->drc_heal) {
/*
* If this is a new dataset we set the key immediately.
* Otherwise we don't want to change the key until we
* are sure the rest of the receive succeeded so we
* stash the keynvl away until then.
*/
err = dsl_crypto_recv_raw(spa_name(drc->drc_os->os_spa),
drc->drc_ds->ds_object, drc->drc_fromsnapobj,
drc->drc_drrb->drr_type, keynvl, drc->drc_newfs);
if (err != 0)
goto out;
}
/* see comment in dmu_recv_end_sync() */
drc->drc_ivset_guid = 0;
(void) nvlist_lookup_uint64(keynvl, "to_ivset_guid",
&drc->drc_ivset_guid);
if (!drc->drc_newfs)
drc->drc_keynvl = fnvlist_dup(keynvl);
}
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) {
err = resume_check(drc, drc->drc_begin_nvl);
if (err != 0)
goto out;
}
/*
* For compatibility with recursive send streams, we do this here,
* rather than in dmu_recv_begin. If we pull the next header too
* early, and it's the END record, we break the `recv_skip` logic.
*/
if (drc->drc_drr_begin->drr_payloadlen == 0) {
err = receive_read_payload_and_next_header(drc, 0, NULL);
if (err != 0)
goto out;
}
/*
* If we failed before this point we will clean up any new resume
* state that was created. Now that we've gotten past the initial
* checks we are ok to retain that resume state.
*/
drc->drc_should_save = B_TRUE;
(void) bqueue_init(&rwa->q, zfs_recv_queue_ff,
MAX(zfs_recv_queue_length, 2 * zfs_max_recordsize),
offsetof(struct receive_record_arg, node));
cv_init(&rwa->cv, NULL, CV_DEFAULT, NULL);
mutex_init(&rwa->mutex, NULL, MUTEX_DEFAULT, NULL);
rwa->os = drc->drc_os;
rwa->byteswap = drc->drc_byteswap;
rwa->heal = drc->drc_heal;
rwa->tofs = drc->drc_tofs;
rwa->resumable = drc->drc_resumable;
rwa->raw = drc->drc_raw;
rwa->spill = drc->drc_spill;
rwa->full = (drc->drc_drr_begin->drr_u.drr_begin.drr_fromguid == 0);
rwa->os->os_raw_receive = drc->drc_raw;
if (drc->drc_heal) {
rwa->heal_pio = zio_root(drc->drc_os->os_spa, NULL, NULL,
ZIO_FLAG_GODFATHER);
}
list_create(&rwa->write_batch, sizeof (struct receive_record_arg),
offsetof(struct receive_record_arg, node.bqn_node));
(void) thread_create(NULL, 0, receive_writer_thread, rwa, 0, curproc,
TS_RUN, minclsyspri);
/*
* We're reading rwa->err without locks, which is safe since we are the
* only reader, and the worker thread is the only writer. It's ok if we
* miss a write for an iteration or two of the loop, since the writer
* thread will keep freeing records we send it until we send it an eos
* marker.
*
* We can leave this loop in 3 ways: First, if rwa->err is
* non-zero. In that case, the writer thread will free the rrd we just
* pushed. Second, if we're interrupted; in that case, either it's the
* first loop and drc->drc_rrd was never allocated, or it's later, and
* drc->drc_rrd has been handed off to the writer thread who will free
* it. Finally, if receive_read_record fails or we're at the end of the
* stream, then we free drc->drc_rrd and exit.
*/
while (rwa->err == 0) {
if (issig(JUSTLOOKING) && issig(FORREAL)) {
err = SET_ERROR(EINTR);
break;
}
ASSERT3P(drc->drc_rrd, ==, NULL);
drc->drc_rrd = drc->drc_next_rrd;
drc->drc_next_rrd = NULL;
/* Allocates and loads header into drc->drc_next_rrd */
err = receive_read_record(drc);
if (drc->drc_rrd->header.drr_type == DRR_END || err != 0) {
kmem_free(drc->drc_rrd, sizeof (*drc->drc_rrd));
drc->drc_rrd = NULL;
break;
}
bqueue_enqueue(&rwa->q, drc->drc_rrd,
sizeof (struct receive_record_arg) +
drc->drc_rrd->payload_size);
drc->drc_rrd = NULL;
}
ASSERT3P(drc->drc_rrd, ==, NULL);
drc->drc_rrd = kmem_zalloc(sizeof (*drc->drc_rrd), KM_SLEEP);
drc->drc_rrd->eos_marker = B_TRUE;
bqueue_enqueue_flush(&rwa->q, drc->drc_rrd, 1);
mutex_enter(&rwa->mutex);
while (!rwa->done) {
/*
* We need to use cv_wait_sig() so that any process that may
* be sleeping here can still fork.
*/
(void) cv_wait_sig(&rwa->cv, &rwa->mutex);
}
mutex_exit(&rwa->mutex);
/*
* If we are receiving a full stream as a clone, all object IDs which
* are greater than the maximum ID referenced in the stream are
* by definition unused and must be freed.
*/
if (drc->drc_clone && drc->drc_drrb->drr_fromguid == 0) {
uint64_t obj = rwa->max_object + 1;
int free_err = 0;
int next_err = 0;
while (next_err == 0) {
free_err = dmu_free_long_object(rwa->os, obj);
if (free_err != 0 && free_err != ENOENT)
break;
next_err = dmu_object_next(rwa->os, &obj, FALSE, 0);
}
if (err == 0) {
if (free_err != 0 && free_err != ENOENT)
err = free_err;
else if (next_err != ESRCH)
err = next_err;
}
}
cv_destroy(&rwa->cv);
mutex_destroy(&rwa->mutex);
bqueue_destroy(&rwa->q);
list_destroy(&rwa->write_batch);
if (err == 0)
err = rwa->err;
out:
/*
* If we hit an error before we started the receive_writer_thread
* we need to clean up the next_rrd we create by processing the
* DRR_BEGIN record.
*/
if (drc->drc_next_rrd != NULL)
kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
/*
* The objset will be invalidated by dmu_recv_end() when we do
* dsl_dataset_clone_swap_sync_impl().
*/
drc->drc_os = NULL;
kmem_free(rwa, sizeof (*rwa));
nvlist_free(drc->drc_begin_nvl);
if (err != 0) {
/*
* Clean up references. If receive is not resumable,
* destroy what we created, so we don't leave it in
* the inconsistent state.
*/
dmu_recv_cleanup_ds(drc);
nvlist_free(drc->drc_keynvl);
}
objlist_destroy(drc->drc_ignore_objlist);
drc->drc_ignore_objlist = NULL;
*voffp = drc->drc_voff;
return (err);
}
static int
dmu_recv_end_check(void *arg, dmu_tx_t *tx)
{
dmu_recv_cookie_t *drc = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
int error;
ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
if (drc->drc_heal) {
error = 0;
} else if (!drc->drc_newfs) {
dsl_dataset_t *origin_head;
error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
if (error != 0)
return (error);
if (drc->drc_force) {
/*
* We will destroy any snapshots in tofs (i.e. before
* origin_head) that are after the origin (which is
* the snap before drc_ds, because drc_ds can not
* have any snaps of its own).
*/
uint64_t obj;
obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
while (obj !=
dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
dsl_dataset_t *snap;
error = dsl_dataset_hold_obj(dp, obj, FTAG,
&snap);
if (error != 0)
break;
if (snap->ds_dir != origin_head->ds_dir)
error = SET_ERROR(EINVAL);
if (error == 0) {
error = dsl_destroy_snapshot_check_impl(
snap, B_FALSE);
}
obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
dsl_dataset_rele(snap, FTAG);
if (error != 0)
break;
}
if (error != 0) {
dsl_dataset_rele(origin_head, FTAG);
return (error);
}
}
if (drc->drc_keynvl != NULL) {
error = dsl_crypto_recv_raw_key_check(drc->drc_ds,
drc->drc_keynvl, tx);
if (error != 0) {
dsl_dataset_rele(origin_head, FTAG);
return (error);
}
}
error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
origin_head, drc->drc_force, drc->drc_owner, tx);
if (error != 0) {
dsl_dataset_rele(origin_head, FTAG);
return (error);
}
error = dsl_dataset_snapshot_check_impl(origin_head,
drc->drc_tosnap, tx, B_TRUE, 1,
drc->drc_cred, drc->drc_proc);
dsl_dataset_rele(origin_head, FTAG);
if (error != 0)
return (error);
error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
} else {
error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
drc->drc_tosnap, tx, B_TRUE, 1,
drc->drc_cred, drc->drc_proc);
}
return (error);
}
static void
dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
{
dmu_recv_cookie_t *drc = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
boolean_t encrypted = drc->drc_ds->ds_dir->dd_crypto_obj != 0;
uint64_t newsnapobj = 0;
spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
tx, "snap=%s", drc->drc_tosnap);
drc->drc_ds->ds_objset->os_raw_receive = B_FALSE;
if (drc->drc_heal) {
if (drc->drc_keynvl != NULL) {
nvlist_free(drc->drc_keynvl);
drc->drc_keynvl = NULL;
}
} else if (!drc->drc_newfs) {
dsl_dataset_t *origin_head;
VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
&origin_head));
if (drc->drc_force) {
/*
* Destroy any snapshots of drc_tofs (origin_head)
* after the origin (the snap before drc_ds).
*/
uint64_t obj;
obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
while (obj !=
dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
dsl_dataset_t *snap;
VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
&snap));
ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
dsl_destroy_snapshot_sync_impl(snap,
B_FALSE, tx);
dsl_dataset_rele(snap, FTAG);
}
}
if (drc->drc_keynvl != NULL) {
dsl_crypto_recv_raw_key_sync(drc->drc_ds,
drc->drc_keynvl, tx);
nvlist_free(drc->drc_keynvl);
drc->drc_keynvl = NULL;
}
VERIFY3P(drc->drc_ds->ds_prev, ==,
origin_head->ds_prev);
dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
origin_head, tx);
/*
* The objset was evicted by dsl_dataset_clone_swap_sync_impl,
* so drc_os is no longer valid.
*/
drc->drc_os = NULL;
dsl_dataset_snapshot_sync_impl(origin_head,
drc->drc_tosnap, tx);
/* set snapshot's creation time and guid */
dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
drc->drc_drrb->drr_creation_time;
dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
drc->drc_drrb->drr_toguid;
dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
~DS_FLAG_INCONSISTENT;
dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
dsl_dataset_phys(origin_head)->ds_flags &=
~DS_FLAG_INCONSISTENT;
newsnapobj =
dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
dsl_dataset_rele(origin_head, FTAG);
dsl_destroy_head_sync_impl(drc->drc_ds, tx);
if (drc->drc_owner != NULL)
VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
} else {
dsl_dataset_t *ds = drc->drc_ds;
dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
/* set snapshot's creation time and guid */
dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
drc->drc_drrb->drr_creation_time;
dsl_dataset_phys(ds->ds_prev)->ds_guid =
drc->drc_drrb->drr_toguid;
dsl_dataset_phys(ds->ds_prev)->ds_flags &=
~DS_FLAG_INCONSISTENT;
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
if (dsl_dataset_has_resume_receive_state(ds)) {
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_FROMGUID, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_OBJECT, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_OFFSET, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_BYTES, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TOGUID, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TONAME, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS, tx);
}
newsnapobj =
dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
}
/*
* If this is a raw receive, the crypt_keydata nvlist will include
* a to_ivset_guid for us to set on the new snapshot. This value
* will override the value generated by the snapshot code. However,
* this value may not be present, because older implementations of
* the raw send code did not include this value, and we are still
* allowed to receive them if the zfs_disable_ivset_guid_check
* tunable is set, in which case we will leave the newly-generated
* value.
*/
if (!drc->drc_heal && drc->drc_raw && drc->drc_ivset_guid != 0) {
dmu_object_zapify(dp->dp_meta_objset, newsnapobj,
DMU_OT_DSL_DATASET, tx);
VERIFY0(zap_update(dp->dp_meta_objset, newsnapobj,
DS_FIELD_IVSET_GUID, sizeof (uint64_t), 1,
&drc->drc_ivset_guid, tx));
}
/*
* Release the hold from dmu_recv_begin. This must be done before
* we return to open context, so that when we free the dataset's dnode
* we can evict its bonus buffer. Since the dataset may be destroyed
* at this point (and therefore won't have a valid pointer to the spa)
* we release the key mapping manually here while we do have a valid
* pointer, if it exists.
*/
if (!drc->drc_raw && encrypted) {
(void) spa_keystore_remove_mapping(dmu_tx_pool(tx)->dp_spa,
drc->drc_ds->ds_object, drc->drc_ds);
}
dsl_dataset_disown(drc->drc_ds, 0, dmu_recv_tag);
drc->drc_ds = NULL;
}
static int dmu_recv_end_modified_blocks = 3;
static int
dmu_recv_existing_end(dmu_recv_cookie_t *drc)
{
#ifdef _KERNEL
/*
* We will be destroying the ds; make sure its origin is unmounted if
* necessary.
*/
char name[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_name(drc->drc_ds, name);
zfs_destroy_unmount_origin(name);
#endif
return (dsl_sync_task(drc->drc_tofs,
dmu_recv_end_check, dmu_recv_end_sync, drc,
dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
}
static int
dmu_recv_new_end(dmu_recv_cookie_t *drc)
{
return (dsl_sync_task(drc->drc_tofs,
dmu_recv_end_check, dmu_recv_end_sync, drc,
dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
}
int
dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
{
int error;
drc->drc_owner = owner;
if (drc->drc_newfs)
error = dmu_recv_new_end(drc);
else
error = dmu_recv_existing_end(drc);
if (error != 0) {
dmu_recv_cleanup_ds(drc);
nvlist_free(drc->drc_keynvl);
} else if (!drc->drc_heal) {
if (drc->drc_newfs) {
zvol_create_minor(drc->drc_tofs);
}
char *snapname = kmem_asprintf("%s@%s",
drc->drc_tofs, drc->drc_tosnap);
zvol_create_minor(snapname);
kmem_strfree(snapname);
}
return (error);
}
/*
* Return TRUE if this objset is currently being received into.
*/
boolean_t
dmu_objset_is_receiving(objset_t *os)
{
return (os->os_dsl_dataset != NULL &&
os->os_dsl_dataset->ds_owner == dmu_recv_tag);
}
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_length, UINT, ZMOD_RW,
"Maximum receive queue length");
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_ff, UINT, ZMOD_RW,
"Receive queue fill fraction");
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, write_batch_size, UINT, ZMOD_RW,
"Maximum amount of writes to batch into one transaction");
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, best_effort_corrective, INT, ZMOD_RW,
"Ignore errors during corrective receive");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/dmu_zfetch.c b/sys/contrib/openzfs/module/zfs/dmu_zfetch.c
index b70459380c24..d0acaf502066 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_zfetch.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_zfetch.c
@@ -1,585 +1,587 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2013, 2017 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/arc_impl.h>
#include <sys/dnode.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_zfetch.h>
#include <sys/dmu.h>
#include <sys/dbuf.h>
#include <sys/kstat.h>
#include <sys/wmsum.h>
/*
* This tunable disables predictive prefetch. Note that it leaves "prescient"
* prefetch (e.g. prefetch for zfs send) intact. Unlike predictive prefetch,
* prescient prefetch never issues i/os that end up not being needed,
* so it can't hurt performance.
*/
static int zfs_prefetch_disable = B_FALSE;
/* max # of streams per zfetch */
static unsigned int zfetch_max_streams = 8;
/* min time before stream reclaim */
static unsigned int zfetch_min_sec_reap = 1;
/* max time before stream delete */
static unsigned int zfetch_max_sec_reap = 2;
+#ifdef _ILP32
+/* min bytes to prefetch per stream (default 2MB) */
+static unsigned int zfetch_min_distance = 2 * 1024 * 1024;
+/* max bytes to prefetch per stream (default 8MB) */
+unsigned int zfetch_max_distance = 8 * 1024 * 1024;
+#else
/* min bytes to prefetch per stream (default 4MB) */
static unsigned int zfetch_min_distance = 4 * 1024 * 1024;
/* max bytes to prefetch per stream (default 64MB) */
unsigned int zfetch_max_distance = 64 * 1024 * 1024;
+#endif
/* max bytes to prefetch indirects for per stream (default 64MB) */
unsigned int zfetch_max_idistance = 64 * 1024 * 1024;
-/* max number of bytes in an array_read in which we allow prefetching (1MB) */
-uint64_t zfetch_array_rd_sz = 1024 * 1024;
typedef struct zfetch_stats {
kstat_named_t zfetchstat_hits;
kstat_named_t zfetchstat_misses;
kstat_named_t zfetchstat_max_streams;
kstat_named_t zfetchstat_io_issued;
kstat_named_t zfetchstat_io_active;
} zfetch_stats_t;
static zfetch_stats_t zfetch_stats = {
{ "hits", KSTAT_DATA_UINT64 },
{ "misses", KSTAT_DATA_UINT64 },
{ "max_streams", KSTAT_DATA_UINT64 },
{ "io_issued", KSTAT_DATA_UINT64 },
{ "io_active", KSTAT_DATA_UINT64 },
};
struct {
wmsum_t zfetchstat_hits;
wmsum_t zfetchstat_misses;
wmsum_t zfetchstat_max_streams;
wmsum_t zfetchstat_io_issued;
aggsum_t zfetchstat_io_active;
} zfetch_sums;
#define ZFETCHSTAT_BUMP(stat) \
wmsum_add(&zfetch_sums.stat, 1)
#define ZFETCHSTAT_ADD(stat, val) \
wmsum_add(&zfetch_sums.stat, val)
static kstat_t *zfetch_ksp;
static int
zfetch_kstats_update(kstat_t *ksp, int rw)
{
zfetch_stats_t *zs = ksp->ks_data;
if (rw == KSTAT_WRITE)
return (EACCES);
zs->zfetchstat_hits.value.ui64 =
wmsum_value(&zfetch_sums.zfetchstat_hits);
zs->zfetchstat_misses.value.ui64 =
wmsum_value(&zfetch_sums.zfetchstat_misses);
zs->zfetchstat_max_streams.value.ui64 =
wmsum_value(&zfetch_sums.zfetchstat_max_streams);
zs->zfetchstat_io_issued.value.ui64 =
wmsum_value(&zfetch_sums.zfetchstat_io_issued);
zs->zfetchstat_io_active.value.ui64 =
aggsum_value(&zfetch_sums.zfetchstat_io_active);
return (0);
}
void
zfetch_init(void)
{
wmsum_init(&zfetch_sums.zfetchstat_hits, 0);
wmsum_init(&zfetch_sums.zfetchstat_misses, 0);
wmsum_init(&zfetch_sums.zfetchstat_max_streams, 0);
wmsum_init(&zfetch_sums.zfetchstat_io_issued, 0);
aggsum_init(&zfetch_sums.zfetchstat_io_active, 0);
zfetch_ksp = kstat_create("zfs", 0, "zfetchstats", "misc",
KSTAT_TYPE_NAMED, sizeof (zfetch_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (zfetch_ksp != NULL) {
zfetch_ksp->ks_data = &zfetch_stats;
zfetch_ksp->ks_update = zfetch_kstats_update;
kstat_install(zfetch_ksp);
}
}
void
zfetch_fini(void)
{
if (zfetch_ksp != NULL) {
kstat_delete(zfetch_ksp);
zfetch_ksp = NULL;
}
wmsum_fini(&zfetch_sums.zfetchstat_hits);
wmsum_fini(&zfetch_sums.zfetchstat_misses);
wmsum_fini(&zfetch_sums.zfetchstat_max_streams);
wmsum_fini(&zfetch_sums.zfetchstat_io_issued);
ASSERT0(aggsum_value(&zfetch_sums.zfetchstat_io_active));
aggsum_fini(&zfetch_sums.zfetchstat_io_active);
}
/*
* This takes a pointer to a zfetch structure and a dnode. It performs the
* necessary setup for the zfetch structure, grokking data from the
* associated dnode.
*/
void
dmu_zfetch_init(zfetch_t *zf, dnode_t *dno)
{
if (zf == NULL)
return;
zf->zf_dnode = dno;
zf->zf_numstreams = 0;
list_create(&zf->zf_stream, sizeof (zstream_t),
offsetof(zstream_t, zs_node));
mutex_init(&zf->zf_lock, NULL, MUTEX_DEFAULT, NULL);
}
static void
dmu_zfetch_stream_fini(zstream_t *zs)
{
ASSERT(!list_link_active(&zs->zs_node));
zfs_refcount_destroy(&zs->zs_callers);
zfs_refcount_destroy(&zs->zs_refs);
kmem_free(zs, sizeof (*zs));
}
static void
dmu_zfetch_stream_remove(zfetch_t *zf, zstream_t *zs)
{
ASSERT(MUTEX_HELD(&zf->zf_lock));
list_remove(&zf->zf_stream, zs);
zf->zf_numstreams--;
membar_producer();
if (zfs_refcount_remove(&zs->zs_refs, NULL) == 0)
dmu_zfetch_stream_fini(zs);
}
/*
* Clean-up state associated with a zfetch structure (e.g. destroy the
* streams). This doesn't free the zfetch_t itself, that's left to the caller.
*/
void
dmu_zfetch_fini(zfetch_t *zf)
{
zstream_t *zs;
mutex_enter(&zf->zf_lock);
while ((zs = list_head(&zf->zf_stream)) != NULL)
dmu_zfetch_stream_remove(zf, zs);
mutex_exit(&zf->zf_lock);
list_destroy(&zf->zf_stream);
mutex_destroy(&zf->zf_lock);
zf->zf_dnode = NULL;
}
/*
* If there aren't too many active streams already, create one more.
* In process delete/reuse all streams without hits for zfetch_max_sec_reap.
* If needed, reuse oldest stream without hits for zfetch_min_sec_reap or ever.
* The "blkid" argument is the next block that we expect this stream to access.
*/
static void
dmu_zfetch_stream_create(zfetch_t *zf, uint64_t blkid)
{
zstream_t *zs, *zs_next, *zs_old = NULL;
hrtime_t now = gethrtime(), t;
ASSERT(MUTEX_HELD(&zf->zf_lock));
/*
* Delete too old streams, reusing the first found one.
*/
t = now - SEC2NSEC(zfetch_max_sec_reap);
for (zs = list_head(&zf->zf_stream); zs != NULL; zs = zs_next) {
zs_next = list_next(&zf->zf_stream, zs);
/*
* Skip if still active. 1 -- zf_stream reference.
*/
if (zfs_refcount_count(&zs->zs_refs) != 1)
continue;
if (zs->zs_atime > t)
continue;
if (zs_old)
dmu_zfetch_stream_remove(zf, zs);
else
zs_old = zs;
}
if (zs_old) {
zs = zs_old;
goto reuse;
}
/*
* The maximum number of streams is normally zfetch_max_streams,
* but for small files we lower it such that it's at least possible
* for all the streams to be non-overlapping.
*/
uint32_t max_streams = MAX(1, MIN(zfetch_max_streams,
zf->zf_dnode->dn_maxblkid * zf->zf_dnode->dn_datablksz /
zfetch_max_distance));
if (zf->zf_numstreams >= max_streams) {
t = now - SEC2NSEC(zfetch_min_sec_reap);
for (zs = list_head(&zf->zf_stream); zs != NULL;
zs = list_next(&zf->zf_stream, zs)) {
if (zfs_refcount_count(&zs->zs_refs) != 1)
continue;
if (zs->zs_atime > t)
continue;
if (zs_old == NULL || zs->zs_atime < zs_old->zs_atime)
zs_old = zs;
}
if (zs_old) {
zs = zs_old;
goto reuse;
}
ZFETCHSTAT_BUMP(zfetchstat_max_streams);
return;
}
zs = kmem_zalloc(sizeof (*zs), KM_SLEEP);
zs->zs_fetch = zf;
zfs_refcount_create(&zs->zs_callers);
zfs_refcount_create(&zs->zs_refs);
/* One reference for zf_stream. */
zfs_refcount_add(&zs->zs_refs, NULL);
zf->zf_numstreams++;
list_insert_head(&zf->zf_stream, zs);
reuse:
zs->zs_blkid = blkid;
zs->zs_pf_dist = 0;
zs->zs_pf_start = blkid;
zs->zs_pf_end = blkid;
zs->zs_ipf_dist = 0;
zs->zs_ipf_start = blkid;
zs->zs_ipf_end = blkid;
/* Allow immediate stream reuse until first hit. */
zs->zs_atime = now - SEC2NSEC(zfetch_min_sec_reap);
zs->zs_missed = B_FALSE;
zs->zs_more = B_FALSE;
}
static void
dmu_zfetch_done(void *arg, uint64_t level, uint64_t blkid, boolean_t io_issued)
{
zstream_t *zs = arg;
if (io_issued && level == 0 && blkid < zs->zs_blkid)
zs->zs_more = B_TRUE;
if (zfs_refcount_remove(&zs->zs_refs, NULL) == 0)
dmu_zfetch_stream_fini(zs);
aggsum_add(&zfetch_sums.zfetchstat_io_active, -1);
}
/*
* This is the predictive prefetch entry point. dmu_zfetch_prepare()
* associates dnode access specified with blkid and nblks arguments with
* prefetch stream, predicts further accesses based on that stats and returns
* the stream pointer on success. That pointer must later be passed to
* dmu_zfetch_run() to initiate the speculative prefetch for the stream and
* release it. dmu_zfetch() is a wrapper for simple cases when window between
* prediction and prefetch initiation is not needed.
* fetch_data argument specifies whether actual data blocks should be fetched:
* FALSE -- prefetch only indirect blocks for predicted data blocks;
* TRUE -- prefetch predicted data blocks plus following indirect blocks.
*/
zstream_t *
dmu_zfetch_prepare(zfetch_t *zf, uint64_t blkid, uint64_t nblks,
boolean_t fetch_data, boolean_t have_lock)
{
zstream_t *zs;
spa_t *spa = zf->zf_dnode->dn_objset->os_spa;
if (zfs_prefetch_disable)
return (NULL);
/*
* If we haven't yet loaded the indirect vdevs' mappings, we
* can only read from blocks that we carefully ensure are on
* concrete vdevs (or previously-loaded indirect vdevs). So we
* can't allow the predictive prefetcher to attempt reads of other
* blocks (e.g. of the MOS's dnode object).
*/
if (!spa_indirect_vdevs_loaded(spa))
return (NULL);
/*
* As a fast path for small (single-block) files, ignore access
* to the first block.
*/
if (!have_lock && blkid == 0)
return (NULL);
if (!have_lock)
rw_enter(&zf->zf_dnode->dn_struct_rwlock, RW_READER);
/*
* A fast path for small files for which no prefetch will
* happen.
*/
uint64_t maxblkid = zf->zf_dnode->dn_maxblkid;
if (maxblkid < 2) {
if (!have_lock)
rw_exit(&zf->zf_dnode->dn_struct_rwlock);
return (NULL);
}
mutex_enter(&zf->zf_lock);
/*
* Find matching prefetch stream. Depending on whether the accesses
* are block-aligned, first block of the new access may either follow
* the last block of the previous access, or be equal to it.
*/
for (zs = list_head(&zf->zf_stream); zs != NULL;
zs = list_next(&zf->zf_stream, zs)) {
if (blkid == zs->zs_blkid) {
break;
} else if (blkid + 1 == zs->zs_blkid) {
blkid++;
nblks--;
break;
}
}
/*
* If the file is ending, remove the matching stream if found.
* If not found then it is too late to create a new one now.
*/
uint64_t end_of_access_blkid = blkid + nblks;
if (end_of_access_blkid >= maxblkid) {
if (zs != NULL)
dmu_zfetch_stream_remove(zf, zs);
mutex_exit(&zf->zf_lock);
if (!have_lock)
rw_exit(&zf->zf_dnode->dn_struct_rwlock);
return (NULL);
}
/* Exit if we already prefetched this block before. */
if (nblks == 0) {
mutex_exit(&zf->zf_lock);
if (!have_lock)
rw_exit(&zf->zf_dnode->dn_struct_rwlock);
return (NULL);
}
if (zs == NULL) {
/*
* This access is not part of any existing stream. Create
* a new stream for it.
*/
dmu_zfetch_stream_create(zf, end_of_access_blkid);
mutex_exit(&zf->zf_lock);
if (!have_lock)
rw_exit(&zf->zf_dnode->dn_struct_rwlock);
ZFETCHSTAT_BUMP(zfetchstat_misses);
return (NULL);
}
/*
* This access was to a block that we issued a prefetch for on
* behalf of this stream. Calculate further prefetch distances.
*
* Start prefetch from the demand access size (nblks). Double the
* distance every access up to zfetch_min_distance. After that only
* if needed increase the distance by 1/8 up to zfetch_max_distance.
*
* Don't double the distance beyond single block if we have more
* than ~6% of ARC held by active prefetches. It should help with
* getting out of RAM on some badly mispredicted read patterns.
*/
unsigned int dbs = zf->zf_dnode->dn_datablkshift;
unsigned int nbytes = nblks << dbs;
unsigned int pf_nblks;
if (fetch_data) {
if (unlikely(zs->zs_pf_dist < nbytes))
zs->zs_pf_dist = nbytes;
else if (zs->zs_pf_dist < zfetch_min_distance &&
(zs->zs_pf_dist < (1 << dbs) ||
aggsum_compare(&zfetch_sums.zfetchstat_io_active,
arc_c_max >> (4 + dbs)) < 0))
zs->zs_pf_dist *= 2;
else if (zs->zs_more)
zs->zs_pf_dist += zs->zs_pf_dist / 8;
zs->zs_more = B_FALSE;
if (zs->zs_pf_dist > zfetch_max_distance)
zs->zs_pf_dist = zfetch_max_distance;
pf_nblks = zs->zs_pf_dist >> dbs;
} else {
pf_nblks = 0;
}
if (zs->zs_pf_start < end_of_access_blkid)
zs->zs_pf_start = end_of_access_blkid;
if (zs->zs_pf_end < end_of_access_blkid + pf_nblks)
zs->zs_pf_end = end_of_access_blkid + pf_nblks;
/*
* Do the same for indirects, starting where we will stop reading
* data blocks (and the indirects that point to them).
*/
if (unlikely(zs->zs_ipf_dist < nbytes))
zs->zs_ipf_dist = nbytes;
else
zs->zs_ipf_dist *= 2;
if (zs->zs_ipf_dist > zfetch_max_idistance)
zs->zs_ipf_dist = zfetch_max_idistance;
pf_nblks = zs->zs_ipf_dist >> dbs;
if (zs->zs_ipf_start < zs->zs_pf_end)
zs->zs_ipf_start = zs->zs_pf_end;
if (zs->zs_ipf_end < zs->zs_pf_end + pf_nblks)
zs->zs_ipf_end = zs->zs_pf_end + pf_nblks;
zs->zs_blkid = end_of_access_blkid;
/* Protect the stream from reclamation. */
zs->zs_atime = gethrtime();
zfs_refcount_add(&zs->zs_refs, NULL);
/* Count concurrent callers. */
zfs_refcount_add(&zs->zs_callers, NULL);
mutex_exit(&zf->zf_lock);
if (!have_lock)
rw_exit(&zf->zf_dnode->dn_struct_rwlock);
ZFETCHSTAT_BUMP(zfetchstat_hits);
return (zs);
}
void
dmu_zfetch_run(zstream_t *zs, boolean_t missed, boolean_t have_lock)
{
zfetch_t *zf = zs->zs_fetch;
int64_t pf_start, pf_end, ipf_start, ipf_end;
int epbs, issued;
if (missed)
zs->zs_missed = missed;
/*
* Postpone the prefetch if there are more concurrent callers.
* It happens when multiple requests are waiting for the same
* indirect block. The last one will run the prefetch for all.
*/
if (zfs_refcount_remove(&zs->zs_callers, NULL) != 0) {
/* Drop reference taken in dmu_zfetch_prepare(). */
if (zfs_refcount_remove(&zs->zs_refs, NULL) == 0)
dmu_zfetch_stream_fini(zs);
return;
}
mutex_enter(&zf->zf_lock);
if (zs->zs_missed) {
pf_start = zs->zs_pf_start;
pf_end = zs->zs_pf_start = zs->zs_pf_end;
} else {
pf_start = pf_end = 0;
}
ipf_start = zs->zs_ipf_start;
ipf_end = zs->zs_ipf_start = zs->zs_ipf_end;
mutex_exit(&zf->zf_lock);
ASSERT3S(pf_start, <=, pf_end);
ASSERT3S(ipf_start, <=, ipf_end);
epbs = zf->zf_dnode->dn_indblkshift - SPA_BLKPTRSHIFT;
ipf_start = P2ROUNDUP(ipf_start, 1 << epbs) >> epbs;
ipf_end = P2ROUNDUP(ipf_end, 1 << epbs) >> epbs;
ASSERT3S(ipf_start, <=, ipf_end);
issued = pf_end - pf_start + ipf_end - ipf_start;
if (issued > 1) {
/* More references on top of taken in dmu_zfetch_prepare(). */
zfs_refcount_add_few(&zs->zs_refs, issued - 1, NULL);
} else if (issued == 0) {
/* Some other thread has done our work, so drop the ref. */
if (zfs_refcount_remove(&zs->zs_refs, NULL) == 0)
dmu_zfetch_stream_fini(zs);
return;
}
aggsum_add(&zfetch_sums.zfetchstat_io_active, issued);
if (!have_lock)
rw_enter(&zf->zf_dnode->dn_struct_rwlock, RW_READER);
issued = 0;
for (int64_t blk = pf_start; blk < pf_end; blk++) {
issued += dbuf_prefetch_impl(zf->zf_dnode, 0, blk,
ZIO_PRIORITY_ASYNC_READ, 0, dmu_zfetch_done, zs);
}
for (int64_t iblk = ipf_start; iblk < ipf_end; iblk++) {
issued += dbuf_prefetch_impl(zf->zf_dnode, 1, iblk,
ZIO_PRIORITY_ASYNC_READ, 0, dmu_zfetch_done, zs);
}
if (!have_lock)
rw_exit(&zf->zf_dnode->dn_struct_rwlock);
if (issued)
ZFETCHSTAT_ADD(zfetchstat_io_issued, issued);
}
void
dmu_zfetch(zfetch_t *zf, uint64_t blkid, uint64_t nblks, boolean_t fetch_data,
boolean_t missed, boolean_t have_lock)
{
zstream_t *zs;
zs = dmu_zfetch_prepare(zf, blkid, nblks, fetch_data, have_lock);
if (zs)
dmu_zfetch_run(zs, missed, have_lock);
}
ZFS_MODULE_PARAM(zfs_prefetch, zfs_prefetch_, disable, INT, ZMOD_RW,
"Disable all ZFS prefetching");
ZFS_MODULE_PARAM(zfs_prefetch, zfetch_, max_streams, UINT, ZMOD_RW,
"Max number of streams per zfetch");
ZFS_MODULE_PARAM(zfs_prefetch, zfetch_, min_sec_reap, UINT, ZMOD_RW,
"Min time before stream reclaim");
ZFS_MODULE_PARAM(zfs_prefetch, zfetch_, max_sec_reap, UINT, ZMOD_RW,
"Max time before stream delete");
ZFS_MODULE_PARAM(zfs_prefetch, zfetch_, min_distance, UINT, ZMOD_RW,
"Min bytes to prefetch per stream");
ZFS_MODULE_PARAM(zfs_prefetch, zfetch_, max_distance, UINT, ZMOD_RW,
"Max bytes to prefetch per stream");
ZFS_MODULE_PARAM(zfs_prefetch, zfetch_, max_idistance, UINT, ZMOD_RW,
"Max bytes to prefetch indirects for per stream");
-
-ZFS_MODULE_PARAM(zfs_prefetch, zfetch_, array_rd_sz, U64, ZMOD_RW,
- "Number of bytes in a array_read");
diff --git a/sys/contrib/openzfs/module/zfs/dnode.c b/sys/contrib/openzfs/module/zfs/dnode.c
index d15268cd7bc7..7cf03264dce2 100644
--- a/sys/contrib/openzfs/module/zfs/dnode.c
+++ b/sys/contrib/openzfs/module/zfs/dnode.c
@@ -1,2704 +1,2705 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/dmu_zfetch.h>
#include <sys/range_tree.h>
#include <sys/trace_zfs.h>
#include <sys/zfs_project.h>
dnode_stats_t dnode_stats = {
{ "dnode_hold_dbuf_hold", KSTAT_DATA_UINT64 },
{ "dnode_hold_dbuf_read", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_hits", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_interior", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_lock_retry", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_lock_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_type_none", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_hits", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_lock_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_lock_retry", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_overflow", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_refcount", KSTAT_DATA_UINT64 },
{ "dnode_free_interior_lock_retry", KSTAT_DATA_UINT64 },
{ "dnode_allocate", KSTAT_DATA_UINT64 },
{ "dnode_reallocate", KSTAT_DATA_UINT64 },
{ "dnode_buf_evict", KSTAT_DATA_UINT64 },
{ "dnode_alloc_next_chunk", KSTAT_DATA_UINT64 },
{ "dnode_alloc_race", KSTAT_DATA_UINT64 },
{ "dnode_alloc_next_block", KSTAT_DATA_UINT64 },
{ "dnode_move_invalid", KSTAT_DATA_UINT64 },
{ "dnode_move_recheck1", KSTAT_DATA_UINT64 },
{ "dnode_move_recheck2", KSTAT_DATA_UINT64 },
{ "dnode_move_special", KSTAT_DATA_UINT64 },
{ "dnode_move_handle", KSTAT_DATA_UINT64 },
{ "dnode_move_rwlock", KSTAT_DATA_UINT64 },
{ "dnode_move_active", KSTAT_DATA_UINT64 },
};
dnode_sums_t dnode_sums;
static kstat_t *dnode_ksp;
static kmem_cache_t *dnode_cache;
static dnode_phys_t dnode_phys_zero __maybe_unused;
int zfs_default_bs = SPA_MINBLOCKSHIFT;
int zfs_default_ibs = DN_MAX_INDBLKSHIFT;
#ifdef _KERNEL
static kmem_cbrc_t dnode_move(void *, void *, size_t, void *);
#endif /* _KERNEL */
static int
dbuf_compare(const void *x1, const void *x2)
{
const dmu_buf_impl_t *d1 = x1;
const dmu_buf_impl_t *d2 = x2;
int cmp = TREE_CMP(d1->db_level, d2->db_level);
if (likely(cmp))
return (cmp);
cmp = TREE_CMP(d1->db_blkid, d2->db_blkid);
if (likely(cmp))
return (cmp);
if (d1->db_state == DB_SEARCH) {
ASSERT3S(d2->db_state, !=, DB_SEARCH);
return (-1);
} else if (d2->db_state == DB_SEARCH) {
ASSERT3S(d1->db_state, !=, DB_SEARCH);
return (1);
}
return (TREE_PCMP(d1, d2));
}
static int
dnode_cons(void *arg, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
dnode_t *dn = arg;
rw_init(&dn->dn_struct_rwlock, NULL, RW_NOLOCKDEP, NULL);
mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL);
cv_init(&dn->dn_notxholds, NULL, CV_DEFAULT, NULL);
cv_init(&dn->dn_nodnholds, NULL, CV_DEFAULT, NULL);
/*
* Every dbuf has a reference, and dropping a tracked reference is
* O(number of references), so don't track dn_holds.
*/
zfs_refcount_create_untracked(&dn->dn_holds);
zfs_refcount_create(&dn->dn_tx_holds);
list_link_init(&dn->dn_link);
memset(dn->dn_next_type, 0, sizeof (dn->dn_next_type));
memset(dn->dn_next_nblkptr, 0, sizeof (dn->dn_next_nblkptr));
memset(dn->dn_next_nlevels, 0, sizeof (dn->dn_next_nlevels));
memset(dn->dn_next_indblkshift, 0, sizeof (dn->dn_next_indblkshift));
memset(dn->dn_next_bonustype, 0, sizeof (dn->dn_next_bonustype));
memset(dn->dn_rm_spillblk, 0, sizeof (dn->dn_rm_spillblk));
memset(dn->dn_next_bonuslen, 0, sizeof (dn->dn_next_bonuslen));
memset(dn->dn_next_blksz, 0, sizeof (dn->dn_next_blksz));
memset(dn->dn_next_maxblkid, 0, sizeof (dn->dn_next_maxblkid));
for (int i = 0; i < TXG_SIZE; i++) {
multilist_link_init(&dn->dn_dirty_link[i]);
dn->dn_free_ranges[i] = NULL;
list_create(&dn->dn_dirty_records[i],
sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dirty_node));
}
dn->dn_allocated_txg = 0;
dn->dn_free_txg = 0;
dn->dn_assigned_txg = 0;
dn->dn_dirty_txg = 0;
dn->dn_dirtyctx = 0;
dn->dn_dirtyctx_firstset = NULL;
dn->dn_bonus = NULL;
dn->dn_have_spill = B_FALSE;
dn->dn_zio = NULL;
dn->dn_oldused = 0;
dn->dn_oldflags = 0;
dn->dn_olduid = 0;
dn->dn_oldgid = 0;
dn->dn_oldprojid = ZFS_DEFAULT_PROJID;
dn->dn_newuid = 0;
dn->dn_newgid = 0;
dn->dn_newprojid = ZFS_DEFAULT_PROJID;
dn->dn_id_flags = 0;
dn->dn_dbufs_count = 0;
avl_create(&dn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_link));
dn->dn_moved = 0;
return (0);
}
static void
dnode_dest(void *arg, void *unused)
{
(void) unused;
dnode_t *dn = arg;
rw_destroy(&dn->dn_struct_rwlock);
mutex_destroy(&dn->dn_mtx);
mutex_destroy(&dn->dn_dbufs_mtx);
cv_destroy(&dn->dn_notxholds);
cv_destroy(&dn->dn_nodnholds);
zfs_refcount_destroy(&dn->dn_holds);
zfs_refcount_destroy(&dn->dn_tx_holds);
ASSERT(!list_link_active(&dn->dn_link));
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
list_destroy(&dn->dn_dirty_records[i]);
ASSERT0(dn->dn_next_nblkptr[i]);
ASSERT0(dn->dn_next_nlevels[i]);
ASSERT0(dn->dn_next_indblkshift[i]);
ASSERT0(dn->dn_next_bonustype[i]);
ASSERT0(dn->dn_rm_spillblk[i]);
ASSERT0(dn->dn_next_bonuslen[i]);
ASSERT0(dn->dn_next_blksz[i]);
ASSERT0(dn->dn_next_maxblkid[i]);
}
ASSERT0(dn->dn_allocated_txg);
ASSERT0(dn->dn_free_txg);
ASSERT0(dn->dn_assigned_txg);
ASSERT0(dn->dn_dirty_txg);
ASSERT0(dn->dn_dirtyctx);
ASSERT3P(dn->dn_dirtyctx_firstset, ==, NULL);
ASSERT3P(dn->dn_bonus, ==, NULL);
ASSERT(!dn->dn_have_spill);
ASSERT3P(dn->dn_zio, ==, NULL);
ASSERT0(dn->dn_oldused);
ASSERT0(dn->dn_oldflags);
ASSERT0(dn->dn_olduid);
ASSERT0(dn->dn_oldgid);
ASSERT0(dn->dn_oldprojid);
ASSERT0(dn->dn_newuid);
ASSERT0(dn->dn_newgid);
ASSERT0(dn->dn_newprojid);
ASSERT0(dn->dn_id_flags);
ASSERT0(dn->dn_dbufs_count);
avl_destroy(&dn->dn_dbufs);
}
static int
dnode_kstats_update(kstat_t *ksp, int rw)
{
dnode_stats_t *ds = ksp->ks_data;
if (rw == KSTAT_WRITE)
return (EACCES);
ds->dnode_hold_dbuf_hold.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_dbuf_hold);
ds->dnode_hold_dbuf_read.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_dbuf_read);
ds->dnode_hold_alloc_hits.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_alloc_hits);
ds->dnode_hold_alloc_misses.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_alloc_misses);
ds->dnode_hold_alloc_interior.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_alloc_interior);
ds->dnode_hold_alloc_lock_retry.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_alloc_lock_retry);
ds->dnode_hold_alloc_lock_misses.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_alloc_lock_misses);
ds->dnode_hold_alloc_type_none.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_alloc_type_none);
ds->dnode_hold_free_hits.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_free_hits);
ds->dnode_hold_free_misses.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_free_misses);
ds->dnode_hold_free_lock_misses.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_free_lock_misses);
ds->dnode_hold_free_lock_retry.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_free_lock_retry);
ds->dnode_hold_free_refcount.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_free_refcount);
ds->dnode_hold_free_overflow.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_free_overflow);
ds->dnode_free_interior_lock_retry.value.ui64 =
wmsum_value(&dnode_sums.dnode_free_interior_lock_retry);
ds->dnode_allocate.value.ui64 =
wmsum_value(&dnode_sums.dnode_allocate);
ds->dnode_reallocate.value.ui64 =
wmsum_value(&dnode_sums.dnode_reallocate);
ds->dnode_buf_evict.value.ui64 =
wmsum_value(&dnode_sums.dnode_buf_evict);
ds->dnode_alloc_next_chunk.value.ui64 =
wmsum_value(&dnode_sums.dnode_alloc_next_chunk);
ds->dnode_alloc_race.value.ui64 =
wmsum_value(&dnode_sums.dnode_alloc_race);
ds->dnode_alloc_next_block.value.ui64 =
wmsum_value(&dnode_sums.dnode_alloc_next_block);
ds->dnode_move_invalid.value.ui64 =
wmsum_value(&dnode_sums.dnode_move_invalid);
ds->dnode_move_recheck1.value.ui64 =
wmsum_value(&dnode_sums.dnode_move_recheck1);
ds->dnode_move_recheck2.value.ui64 =
wmsum_value(&dnode_sums.dnode_move_recheck2);
ds->dnode_move_special.value.ui64 =
wmsum_value(&dnode_sums.dnode_move_special);
ds->dnode_move_handle.value.ui64 =
wmsum_value(&dnode_sums.dnode_move_handle);
ds->dnode_move_rwlock.value.ui64 =
wmsum_value(&dnode_sums.dnode_move_rwlock);
ds->dnode_move_active.value.ui64 =
wmsum_value(&dnode_sums.dnode_move_active);
return (0);
}
void
dnode_init(void)
{
ASSERT(dnode_cache == NULL);
dnode_cache = kmem_cache_create("dnode_t", sizeof (dnode_t),
0, dnode_cons, dnode_dest, NULL, NULL, NULL, 0);
kmem_cache_set_move(dnode_cache, dnode_move);
wmsum_init(&dnode_sums.dnode_hold_dbuf_hold, 0);
wmsum_init(&dnode_sums.dnode_hold_dbuf_read, 0);
wmsum_init(&dnode_sums.dnode_hold_alloc_hits, 0);
wmsum_init(&dnode_sums.dnode_hold_alloc_misses, 0);
wmsum_init(&dnode_sums.dnode_hold_alloc_interior, 0);
wmsum_init(&dnode_sums.dnode_hold_alloc_lock_retry, 0);
wmsum_init(&dnode_sums.dnode_hold_alloc_lock_misses, 0);
wmsum_init(&dnode_sums.dnode_hold_alloc_type_none, 0);
wmsum_init(&dnode_sums.dnode_hold_free_hits, 0);
wmsum_init(&dnode_sums.dnode_hold_free_misses, 0);
wmsum_init(&dnode_sums.dnode_hold_free_lock_misses, 0);
wmsum_init(&dnode_sums.dnode_hold_free_lock_retry, 0);
wmsum_init(&dnode_sums.dnode_hold_free_refcount, 0);
wmsum_init(&dnode_sums.dnode_hold_free_overflow, 0);
wmsum_init(&dnode_sums.dnode_free_interior_lock_retry, 0);
wmsum_init(&dnode_sums.dnode_allocate, 0);
wmsum_init(&dnode_sums.dnode_reallocate, 0);
wmsum_init(&dnode_sums.dnode_buf_evict, 0);
wmsum_init(&dnode_sums.dnode_alloc_next_chunk, 0);
wmsum_init(&dnode_sums.dnode_alloc_race, 0);
wmsum_init(&dnode_sums.dnode_alloc_next_block, 0);
wmsum_init(&dnode_sums.dnode_move_invalid, 0);
wmsum_init(&dnode_sums.dnode_move_recheck1, 0);
wmsum_init(&dnode_sums.dnode_move_recheck2, 0);
wmsum_init(&dnode_sums.dnode_move_special, 0);
wmsum_init(&dnode_sums.dnode_move_handle, 0);
wmsum_init(&dnode_sums.dnode_move_rwlock, 0);
wmsum_init(&dnode_sums.dnode_move_active, 0);
dnode_ksp = kstat_create("zfs", 0, "dnodestats", "misc",
KSTAT_TYPE_NAMED, sizeof (dnode_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (dnode_ksp != NULL) {
dnode_ksp->ks_data = &dnode_stats;
dnode_ksp->ks_update = dnode_kstats_update;
kstat_install(dnode_ksp);
}
}
void
dnode_fini(void)
{
if (dnode_ksp != NULL) {
kstat_delete(dnode_ksp);
dnode_ksp = NULL;
}
wmsum_fini(&dnode_sums.dnode_hold_dbuf_hold);
wmsum_fini(&dnode_sums.dnode_hold_dbuf_read);
wmsum_fini(&dnode_sums.dnode_hold_alloc_hits);
wmsum_fini(&dnode_sums.dnode_hold_alloc_misses);
wmsum_fini(&dnode_sums.dnode_hold_alloc_interior);
wmsum_fini(&dnode_sums.dnode_hold_alloc_lock_retry);
wmsum_fini(&dnode_sums.dnode_hold_alloc_lock_misses);
wmsum_fini(&dnode_sums.dnode_hold_alloc_type_none);
wmsum_fini(&dnode_sums.dnode_hold_free_hits);
wmsum_fini(&dnode_sums.dnode_hold_free_misses);
wmsum_fini(&dnode_sums.dnode_hold_free_lock_misses);
wmsum_fini(&dnode_sums.dnode_hold_free_lock_retry);
wmsum_fini(&dnode_sums.dnode_hold_free_refcount);
wmsum_fini(&dnode_sums.dnode_hold_free_overflow);
wmsum_fini(&dnode_sums.dnode_free_interior_lock_retry);
wmsum_fini(&dnode_sums.dnode_allocate);
wmsum_fini(&dnode_sums.dnode_reallocate);
wmsum_fini(&dnode_sums.dnode_buf_evict);
wmsum_fini(&dnode_sums.dnode_alloc_next_chunk);
wmsum_fini(&dnode_sums.dnode_alloc_race);
wmsum_fini(&dnode_sums.dnode_alloc_next_block);
wmsum_fini(&dnode_sums.dnode_move_invalid);
wmsum_fini(&dnode_sums.dnode_move_recheck1);
wmsum_fini(&dnode_sums.dnode_move_recheck2);
wmsum_fini(&dnode_sums.dnode_move_special);
wmsum_fini(&dnode_sums.dnode_move_handle);
wmsum_fini(&dnode_sums.dnode_move_rwlock);
wmsum_fini(&dnode_sums.dnode_move_active);
kmem_cache_destroy(dnode_cache);
dnode_cache = NULL;
}
#ifdef ZFS_DEBUG
void
dnode_verify(dnode_t *dn)
{
int drop_struct_lock = FALSE;
ASSERT(dn->dn_phys);
ASSERT(dn->dn_objset);
ASSERT(dn->dn_handle->dnh_dnode == dn);
ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
if (!(zfs_flags & ZFS_DEBUG_DNODE_VERIFY))
return;
if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
drop_struct_lock = TRUE;
}
if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) {
int i;
int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT);
if (dn->dn_datablkshift) {
ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT);
ASSERT3U(dn->dn_datablkshift, <=, SPA_MAXBLOCKSHIFT);
ASSERT3U(1<<dn->dn_datablkshift, ==, dn->dn_datablksz);
}
ASSERT3U(dn->dn_nlevels, <=, 30);
ASSERT(DMU_OT_IS_VALID(dn->dn_type));
ASSERT3U(dn->dn_nblkptr, >=, 1);
ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
ASSERT3U(dn->dn_bonuslen, <=, max_bonuslen);
ASSERT3U(dn->dn_datablksz, ==,
dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
ASSERT3U(ISP2(dn->dn_datablksz), ==, dn->dn_datablkshift != 0);
ASSERT3U((dn->dn_nblkptr - 1) * sizeof (blkptr_t) +
dn->dn_bonuslen, <=, max_bonuslen);
for (i = 0; i < TXG_SIZE; i++) {
ASSERT3U(dn->dn_next_nlevels[i], <=, dn->dn_nlevels);
}
}
if (dn->dn_phys->dn_type != DMU_OT_NONE)
ASSERT3U(dn->dn_phys->dn_nlevels, <=, dn->dn_nlevels);
ASSERT(DMU_OBJECT_IS_SPECIAL(dn->dn_object) || dn->dn_dbuf != NULL);
if (dn->dn_dbuf != NULL) {
ASSERT3P(dn->dn_phys, ==,
(dnode_phys_t *)dn->dn_dbuf->db.db_data +
(dn->dn_object % (dn->dn_dbuf->db.db_size >> DNODE_SHIFT)));
}
if (drop_struct_lock)
rw_exit(&dn->dn_struct_rwlock);
}
#endif
void
dnode_byteswap(dnode_phys_t *dnp)
{
uint64_t *buf64 = (void*)&dnp->dn_blkptr;
int i;
if (dnp->dn_type == DMU_OT_NONE) {
memset(dnp, 0, sizeof (dnode_phys_t));
return;
}
dnp->dn_datablkszsec = BSWAP_16(dnp->dn_datablkszsec);
dnp->dn_bonuslen = BSWAP_16(dnp->dn_bonuslen);
dnp->dn_extra_slots = BSWAP_8(dnp->dn_extra_slots);
dnp->dn_maxblkid = BSWAP_64(dnp->dn_maxblkid);
dnp->dn_used = BSWAP_64(dnp->dn_used);
/*
* dn_nblkptr is only one byte, so it's OK to read it in either
* byte order. We can't read dn_bouslen.
*/
ASSERT(dnp->dn_indblkshift <= SPA_MAXBLOCKSHIFT);
ASSERT(dnp->dn_nblkptr <= DN_MAX_NBLKPTR);
for (i = 0; i < dnp->dn_nblkptr * sizeof (blkptr_t)/8; i++)
buf64[i] = BSWAP_64(buf64[i]);
/*
* OK to check dn_bonuslen for zero, because it won't matter if
* we have the wrong byte order. This is necessary because the
* dnode dnode is smaller than a regular dnode.
*/
if (dnp->dn_bonuslen != 0) {
dmu_object_byteswap_t byteswap;
ASSERT(DMU_OT_IS_VALID(dnp->dn_bonustype));
byteswap = DMU_OT_BYTESWAP(dnp->dn_bonustype);
dmu_ot_byteswap[byteswap].ob_func(DN_BONUS(dnp),
DN_MAX_BONUS_LEN(dnp));
}
/* Swap SPILL block if we have one */
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
byteswap_uint64_array(DN_SPILL_BLKPTR(dnp), sizeof (blkptr_t));
}
void
dnode_buf_byteswap(void *vbuf, size_t size)
{
int i = 0;
ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT));
ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0);
while (i < size) {
dnode_phys_t *dnp = (void *)(((char *)vbuf) + i);
dnode_byteswap(dnp);
i += DNODE_MIN_SIZE;
if (dnp->dn_type != DMU_OT_NONE)
i += dnp->dn_extra_slots * DNODE_MIN_SIZE;
}
}
void
dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
{
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
dnode_setdirty(dn, tx);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
ASSERT3U(newsize, <=, DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
(dn->dn_nblkptr-1) * sizeof (blkptr_t));
if (newsize < dn->dn_bonuslen) {
/* clear any data after the end of the new size */
size_t diff = dn->dn_bonuslen - newsize;
char *data_end = ((char *)dn->dn_bonus->db.db_data) + newsize;
memset(data_end, 0, diff);
}
dn->dn_bonuslen = newsize;
if (newsize == 0)
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN;
else
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
rw_exit(&dn->dn_struct_rwlock);
}
void
dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx)
{
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
dnode_setdirty(dn, tx);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dn->dn_bonustype = newtype;
dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
rw_exit(&dn->dn_struct_rwlock);
}
void
dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx)
{
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
dnode_setdirty(dn, tx);
dn->dn_rm_spillblk[tx->tx_txg & TXG_MASK] = DN_KILL_SPILLBLK;
dn->dn_have_spill = B_FALSE;
}
static void
dnode_setdblksz(dnode_t *dn, int size)
{
ASSERT0(P2PHASE(size, SPA_MINBLOCKSIZE));
ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
ASSERT3U(size, >=, SPA_MINBLOCKSIZE);
ASSERT3U(size >> SPA_MINBLOCKSHIFT, <,
1<<(sizeof (dn->dn_phys->dn_datablkszsec) * 8));
dn->dn_datablksz = size;
dn->dn_datablkszsec = size >> SPA_MINBLOCKSHIFT;
dn->dn_datablkshift = ISP2(size) ? highbit64(size - 1) : 0;
}
static dnode_t *
dnode_create(objset_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
uint64_t object, dnode_handle_t *dnh)
{
dnode_t *dn;
dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
dn->dn_moved = 0;
/*
* Defer setting dn_objset until the dnode is ready to be a candidate
* for the dnode_move() callback.
*/
dn->dn_object = object;
dn->dn_dbuf = db;
dn->dn_handle = dnh;
dn->dn_phys = dnp;
if (dnp->dn_datablkszsec) {
dnode_setdblksz(dn, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
} else {
dn->dn_datablksz = 0;
dn->dn_datablkszsec = 0;
dn->dn_datablkshift = 0;
}
dn->dn_indblkshift = dnp->dn_indblkshift;
dn->dn_nlevels = dnp->dn_nlevels;
dn->dn_type = dnp->dn_type;
dn->dn_nblkptr = dnp->dn_nblkptr;
dn->dn_checksum = dnp->dn_checksum;
dn->dn_compress = dnp->dn_compress;
dn->dn_bonustype = dnp->dn_bonustype;
dn->dn_bonuslen = dnp->dn_bonuslen;
dn->dn_num_slots = dnp->dn_extra_slots + 1;
dn->dn_maxblkid = dnp->dn_maxblkid;
dn->dn_have_spill = ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0);
dn->dn_id_flags = 0;
dmu_zfetch_init(&dn->dn_zfetch, dn);
ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
ASSERT(zrl_is_locked(&dnh->dnh_zrlock));
ASSERT(!DN_SLOT_IS_PTR(dnh->dnh_dnode));
mutex_enter(&os->os_lock);
/*
* Exclude special dnodes from os_dnodes so an empty os_dnodes
* signifies that the special dnodes have no references from
* their children (the entries in os_dnodes). This allows
* dnode_destroy() to easily determine if the last child has
* been removed and then complete eviction of the objset.
*/
if (!DMU_OBJECT_IS_SPECIAL(object))
list_insert_head(&os->os_dnodes, dn);
membar_producer();
/*
* Everything else must be valid before assigning dn_objset
* makes the dnode eligible for dnode_move().
*/
dn->dn_objset = os;
dnh->dnh_dnode = dn;
mutex_exit(&os->os_lock);
arc_space_consume(sizeof (dnode_t), ARC_SPACE_DNODE);
return (dn);
}
/*
* Caller must be holding the dnode handle, which is released upon return.
*/
static void
dnode_destroy(dnode_t *dn)
{
objset_t *os = dn->dn_objset;
boolean_t complete_os_eviction = B_FALSE;
ASSERT((dn->dn_id_flags & DN_ID_NEW_EXIST) == 0);
mutex_enter(&os->os_lock);
POINTER_INVALIDATE(&dn->dn_objset);
if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
list_remove(&os->os_dnodes, dn);
complete_os_eviction =
list_is_empty(&os->os_dnodes) &&
list_link_active(&os->os_evicting_node);
}
mutex_exit(&os->os_lock);
/* the dnode can no longer move, so we can release the handle */
if (!zrl_is_locked(&dn->dn_handle->dnh_zrlock))
zrl_remove(&dn->dn_handle->dnh_zrlock);
dn->dn_allocated_txg = 0;
dn->dn_free_txg = 0;
dn->dn_assigned_txg = 0;
dn->dn_dirty_txg = 0;
dn->dn_dirtyctx = 0;
dn->dn_dirtyctx_firstset = NULL;
if (dn->dn_bonus != NULL) {
mutex_enter(&dn->dn_bonus->db_mtx);
dbuf_destroy(dn->dn_bonus);
dn->dn_bonus = NULL;
}
dn->dn_zio = NULL;
dn->dn_have_spill = B_FALSE;
dn->dn_oldused = 0;
dn->dn_oldflags = 0;
dn->dn_olduid = 0;
dn->dn_oldgid = 0;
dn->dn_oldprojid = ZFS_DEFAULT_PROJID;
dn->dn_newuid = 0;
dn->dn_newgid = 0;
dn->dn_newprojid = ZFS_DEFAULT_PROJID;
dn->dn_id_flags = 0;
dmu_zfetch_fini(&dn->dn_zfetch);
kmem_cache_free(dnode_cache, dn);
arc_space_return(sizeof (dnode_t), ARC_SPACE_DNODE);
if (complete_os_eviction)
dmu_objset_evict_done(os);
}
void
dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx)
{
int i;
ASSERT3U(dn_slots, >, 0);
ASSERT3U(dn_slots << DNODE_SHIFT, <=,
spa_maxdnodesize(dmu_objset_spa(dn->dn_objset)));
ASSERT3U(blocksize, <=,
spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
if (blocksize == 0)
blocksize = 1 << zfs_default_bs;
else
blocksize = P2ROUNDUP(blocksize, SPA_MINBLOCKSIZE);
if (ibs == 0)
ibs = zfs_default_ibs;
ibs = MIN(MAX(ibs, DN_MIN_INDBLKSHIFT), DN_MAX_INDBLKSHIFT);
dprintf("os=%p obj=%llu txg=%llu blocksize=%d ibs=%d dn_slots=%d\n",
dn->dn_objset, (u_longlong_t)dn->dn_object,
(u_longlong_t)tx->tx_txg, blocksize, ibs, dn_slots);
DNODE_STAT_BUMP(dnode_allocate);
ASSERT(dn->dn_type == DMU_OT_NONE);
ASSERT0(memcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)));
ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE);
ASSERT(ot != DMU_OT_NONE);
ASSERT(DMU_OT_IS_VALID(ot));
ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
(bonustype == DMU_OT_SA && bonuslen == 0) ||
(bonustype != DMU_OT_NONE && bonuslen != 0));
ASSERT(DMU_OT_IS_VALID(bonustype));
ASSERT3U(bonuslen, <=, DN_SLOTS_TO_BONUSLEN(dn_slots));
ASSERT(dn->dn_type == DMU_OT_NONE);
ASSERT0(dn->dn_maxblkid);
ASSERT0(dn->dn_allocated_txg);
ASSERT0(dn->dn_assigned_txg);
ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
ASSERT3U(zfs_refcount_count(&dn->dn_holds), <=, 1);
ASSERT(avl_is_empty(&dn->dn_dbufs));
for (i = 0; i < TXG_SIZE; i++) {
ASSERT0(dn->dn_next_nblkptr[i]);
ASSERT0(dn->dn_next_nlevels[i]);
ASSERT0(dn->dn_next_indblkshift[i]);
ASSERT0(dn->dn_next_bonuslen[i]);
ASSERT0(dn->dn_next_bonustype[i]);
ASSERT0(dn->dn_rm_spillblk[i]);
ASSERT0(dn->dn_next_blksz[i]);
ASSERT0(dn->dn_next_maxblkid[i]);
ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
ASSERT3P(list_head(&dn->dn_dirty_records[i]), ==, NULL);
ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
}
dn->dn_type = ot;
dnode_setdblksz(dn, blocksize);
dn->dn_indblkshift = ibs;
dn->dn_nlevels = 1;
dn->dn_num_slots = dn_slots;
if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
dn->dn_nblkptr = 1;
else {
dn->dn_nblkptr = MIN(DN_MAX_NBLKPTR,
1 + ((DN_SLOTS_TO_BONUSLEN(dn_slots) - bonuslen) >>
SPA_BLKPTRSHIFT));
}
dn->dn_bonustype = bonustype;
dn->dn_bonuslen = bonuslen;
dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
dn->dn_compress = ZIO_COMPRESS_INHERIT;
dn->dn_dirtyctx = 0;
dn->dn_free_txg = 0;
dn->dn_dirtyctx_firstset = NULL;
dn->dn_dirty_txg = 0;
dn->dn_allocated_txg = tx->tx_txg;
dn->dn_id_flags = 0;
dnode_setdirty(dn, tx);
dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz;
}
void
dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
dmu_object_type_t bonustype, int bonuslen, int dn_slots,
boolean_t keep_spill, dmu_tx_t *tx)
{
int nblkptr;
ASSERT3U(blocksize, >=, SPA_MINBLOCKSIZE);
ASSERT3U(blocksize, <=,
spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
ASSERT0(blocksize % SPA_MINBLOCKSIZE);
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx));
ASSERT(tx->tx_txg != 0);
ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
(bonustype != DMU_OT_NONE && bonuslen != 0) ||
(bonustype == DMU_OT_SA && bonuslen == 0));
ASSERT(DMU_OT_IS_VALID(bonustype));
ASSERT3U(bonuslen, <=,
DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(dn->dn_objset))));
ASSERT3U(bonuslen, <=, DN_BONUS_SIZE(dn_slots << DNODE_SHIFT));
dnode_free_interior_slots(dn);
DNODE_STAT_BUMP(dnode_reallocate);
/* clean up any unreferenced dbufs */
dnode_evict_dbufs(dn);
dn->dn_id_flags = 0;
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dnode_setdirty(dn, tx);
if (dn->dn_datablksz != blocksize) {
/* change blocksize */
ASSERT0(dn->dn_maxblkid);
ASSERT(BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) ||
dnode_block_freed(dn, 0));
dnode_setdblksz(dn, blocksize);
dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = blocksize;
}
if (dn->dn_bonuslen != bonuslen)
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = bonuslen;
if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
nblkptr = 1;
else
nblkptr = MIN(DN_MAX_NBLKPTR,
1 + ((DN_SLOTS_TO_BONUSLEN(dn_slots) - bonuslen) >>
SPA_BLKPTRSHIFT));
if (dn->dn_bonustype != bonustype)
dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = bonustype;
if (dn->dn_nblkptr != nblkptr)
dn->dn_next_nblkptr[tx->tx_txg & TXG_MASK] = nblkptr;
if (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR && !keep_spill) {
dbuf_rm_spill(dn, tx);
dnode_rm_spill(dn, tx);
}
rw_exit(&dn->dn_struct_rwlock);
/* change type */
dn->dn_type = ot;
/* change bonus size and type */
mutex_enter(&dn->dn_mtx);
dn->dn_bonustype = bonustype;
dn->dn_bonuslen = bonuslen;
dn->dn_num_slots = dn_slots;
dn->dn_nblkptr = nblkptr;
dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
dn->dn_compress = ZIO_COMPRESS_INHERIT;
ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
/* fix up the bonus db_size */
if (dn->dn_bonus) {
dn->dn_bonus->db.db_size =
DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
(dn->dn_nblkptr-1) * sizeof (blkptr_t);
ASSERT(dn->dn_bonuslen <= dn->dn_bonus->db.db_size);
}
dn->dn_allocated_txg = tx->tx_txg;
mutex_exit(&dn->dn_mtx);
}
#ifdef _KERNEL
static void
dnode_move_impl(dnode_t *odn, dnode_t *ndn)
{
ASSERT(!RW_LOCK_HELD(&odn->dn_struct_rwlock));
ASSERT(MUTEX_NOT_HELD(&odn->dn_mtx));
ASSERT(MUTEX_NOT_HELD(&odn->dn_dbufs_mtx));
/* Copy fields. */
ndn->dn_objset = odn->dn_objset;
ndn->dn_object = odn->dn_object;
ndn->dn_dbuf = odn->dn_dbuf;
ndn->dn_handle = odn->dn_handle;
ndn->dn_phys = odn->dn_phys;
ndn->dn_type = odn->dn_type;
ndn->dn_bonuslen = odn->dn_bonuslen;
ndn->dn_bonustype = odn->dn_bonustype;
ndn->dn_nblkptr = odn->dn_nblkptr;
ndn->dn_checksum = odn->dn_checksum;
ndn->dn_compress = odn->dn_compress;
ndn->dn_nlevels = odn->dn_nlevels;
ndn->dn_indblkshift = odn->dn_indblkshift;
ndn->dn_datablkshift = odn->dn_datablkshift;
ndn->dn_datablkszsec = odn->dn_datablkszsec;
ndn->dn_datablksz = odn->dn_datablksz;
ndn->dn_maxblkid = odn->dn_maxblkid;
ndn->dn_num_slots = odn->dn_num_slots;
memcpy(ndn->dn_next_type, odn->dn_next_type,
sizeof (odn->dn_next_type));
memcpy(ndn->dn_next_nblkptr, odn->dn_next_nblkptr,
sizeof (odn->dn_next_nblkptr));
memcpy(ndn->dn_next_nlevels, odn->dn_next_nlevels,
sizeof (odn->dn_next_nlevels));
memcpy(ndn->dn_next_indblkshift, odn->dn_next_indblkshift,
sizeof (odn->dn_next_indblkshift));
memcpy(ndn->dn_next_bonustype, odn->dn_next_bonustype,
sizeof (odn->dn_next_bonustype));
memcpy(ndn->dn_rm_spillblk, odn->dn_rm_spillblk,
sizeof (odn->dn_rm_spillblk));
memcpy(ndn->dn_next_bonuslen, odn->dn_next_bonuslen,
sizeof (odn->dn_next_bonuslen));
memcpy(ndn->dn_next_blksz, odn->dn_next_blksz,
sizeof (odn->dn_next_blksz));
memcpy(ndn->dn_next_maxblkid, odn->dn_next_maxblkid,
sizeof (odn->dn_next_maxblkid));
for (int i = 0; i < TXG_SIZE; i++) {
list_move_tail(&ndn->dn_dirty_records[i],
&odn->dn_dirty_records[i]);
}
memcpy(ndn->dn_free_ranges, odn->dn_free_ranges,
sizeof (odn->dn_free_ranges));
ndn->dn_allocated_txg = odn->dn_allocated_txg;
ndn->dn_free_txg = odn->dn_free_txg;
ndn->dn_assigned_txg = odn->dn_assigned_txg;
ndn->dn_dirty_txg = odn->dn_dirty_txg;
ndn->dn_dirtyctx = odn->dn_dirtyctx;
ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset;
ASSERT(zfs_refcount_count(&odn->dn_tx_holds) == 0);
zfs_refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
ASSERT(avl_is_empty(&ndn->dn_dbufs));
avl_swap(&ndn->dn_dbufs, &odn->dn_dbufs);
ndn->dn_dbufs_count = odn->dn_dbufs_count;
ndn->dn_bonus = odn->dn_bonus;
ndn->dn_have_spill = odn->dn_have_spill;
ndn->dn_zio = odn->dn_zio;
ndn->dn_oldused = odn->dn_oldused;
ndn->dn_oldflags = odn->dn_oldflags;
ndn->dn_olduid = odn->dn_olduid;
ndn->dn_oldgid = odn->dn_oldgid;
ndn->dn_oldprojid = odn->dn_oldprojid;
ndn->dn_newuid = odn->dn_newuid;
ndn->dn_newgid = odn->dn_newgid;
ndn->dn_newprojid = odn->dn_newprojid;
ndn->dn_id_flags = odn->dn_id_flags;
dmu_zfetch_init(&ndn->dn_zfetch, ndn);
/*
* Update back pointers. Updating the handle fixes the back pointer of
* every descendant dbuf as well as the bonus dbuf.
*/
ASSERT(ndn->dn_handle->dnh_dnode == odn);
ndn->dn_handle->dnh_dnode = ndn;
/*
* Invalidate the original dnode by clearing all of its back pointers.
*/
odn->dn_dbuf = NULL;
odn->dn_handle = NULL;
avl_create(&odn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_link));
odn->dn_dbufs_count = 0;
odn->dn_bonus = NULL;
dmu_zfetch_fini(&odn->dn_zfetch);
/*
* Set the low bit of the objset pointer to ensure that dnode_move()
* recognizes the dnode as invalid in any subsequent callback.
*/
POINTER_INVALIDATE(&odn->dn_objset);
/*
* Satisfy the destructor.
*/
for (int i = 0; i < TXG_SIZE; i++) {
list_create(&odn->dn_dirty_records[i],
sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dirty_node));
odn->dn_free_ranges[i] = NULL;
odn->dn_next_nlevels[i] = 0;
odn->dn_next_indblkshift[i] = 0;
odn->dn_next_bonustype[i] = 0;
odn->dn_rm_spillblk[i] = 0;
odn->dn_next_bonuslen[i] = 0;
odn->dn_next_blksz[i] = 0;
}
odn->dn_allocated_txg = 0;
odn->dn_free_txg = 0;
odn->dn_assigned_txg = 0;
odn->dn_dirty_txg = 0;
odn->dn_dirtyctx = 0;
odn->dn_dirtyctx_firstset = NULL;
odn->dn_have_spill = B_FALSE;
odn->dn_zio = NULL;
odn->dn_oldused = 0;
odn->dn_oldflags = 0;
odn->dn_olduid = 0;
odn->dn_oldgid = 0;
odn->dn_oldprojid = ZFS_DEFAULT_PROJID;
odn->dn_newuid = 0;
odn->dn_newgid = 0;
odn->dn_newprojid = ZFS_DEFAULT_PROJID;
odn->dn_id_flags = 0;
/*
* Mark the dnode.
*/
ndn->dn_moved = 1;
odn->dn_moved = (uint8_t)-1;
}
static kmem_cbrc_t
dnode_move(void *buf, void *newbuf, size_t size, void *arg)
{
dnode_t *odn = buf, *ndn = newbuf;
objset_t *os;
int64_t refcount;
uint32_t dbufs;
/*
* The dnode is on the objset's list of known dnodes if the objset
* pointer is valid. We set the low bit of the objset pointer when
* freeing the dnode to invalidate it, and the memory patterns written
* by kmem (baddcafe and deadbeef) set at least one of the two low bits.
* A newly created dnode sets the objset pointer last of all to indicate
* that the dnode is known and in a valid state to be moved by this
* function.
*/
os = odn->dn_objset;
if (!POINTER_IS_VALID(os)) {
DNODE_STAT_BUMP(dnode_move_invalid);
return (KMEM_CBRC_DONT_KNOW);
}
/*
* Ensure that the objset does not go away during the move.
*/
rw_enter(&os_lock, RW_WRITER);
if (os != odn->dn_objset) {
rw_exit(&os_lock);
DNODE_STAT_BUMP(dnode_move_recheck1);
return (KMEM_CBRC_DONT_KNOW);
}
/*
* If the dnode is still valid, then so is the objset. We know that no
* valid objset can be freed while we hold os_lock, so we can safely
* ensure that the objset remains in use.
*/
mutex_enter(&os->os_lock);
/*
* Recheck the objset pointer in case the dnode was removed just before
* acquiring the lock.
*/
if (os != odn->dn_objset) {
mutex_exit(&os->os_lock);
rw_exit(&os_lock);
DNODE_STAT_BUMP(dnode_move_recheck2);
return (KMEM_CBRC_DONT_KNOW);
}
/*
* At this point we know that as long as we hold os->os_lock, the dnode
* cannot be freed and fields within the dnode can be safely accessed.
* The objset listing this dnode cannot go away as long as this dnode is
* on its list.
*/
rw_exit(&os_lock);
if (DMU_OBJECT_IS_SPECIAL(odn->dn_object)) {
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_special);
return (KMEM_CBRC_NO);
}
ASSERT(odn->dn_dbuf != NULL); /* only "special" dnodes have no parent */
/*
* Lock the dnode handle to prevent the dnode from obtaining any new
* holds. This also prevents the descendant dbufs and the bonus dbuf
* from accessing the dnode, so that we can discount their holds. The
* handle is safe to access because we know that while the dnode cannot
* go away, neither can its handle. Once we hold dnh_zrlock, we can
* safely move any dnode referenced only by dbufs.
*/
if (!zrl_tryenter(&odn->dn_handle->dnh_zrlock)) {
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_handle);
return (KMEM_CBRC_LATER);
}
/*
* Ensure a consistent view of the dnode's holds and the dnode's dbufs.
* We need to guarantee that there is a hold for every dbuf in order to
* determine whether the dnode is actively referenced. Falsely matching
* a dbuf to an active hold would lead to an unsafe move. It's possible
* that a thread already having an active dnode hold is about to add a
* dbuf, and we can't compare hold and dbuf counts while the add is in
* progress.
*/
if (!rw_tryenter(&odn->dn_struct_rwlock, RW_WRITER)) {
zrl_exit(&odn->dn_handle->dnh_zrlock);
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_rwlock);
return (KMEM_CBRC_LATER);
}
/*
* A dbuf may be removed (evicted) without an active dnode hold. In that
* case, the dbuf count is decremented under the handle lock before the
* dbuf's hold is released. This order ensures that if we count the hold
* after the dbuf is removed but before its hold is released, we will
* treat the unmatched hold as active and exit safely. If we count the
* hold before the dbuf is removed, the hold is discounted, and the
* removal is blocked until the move completes.
*/
refcount = zfs_refcount_count(&odn->dn_holds);
ASSERT(refcount >= 0);
dbufs = DN_DBUFS_COUNT(odn);
/* We can't have more dbufs than dnode holds. */
ASSERT3U(dbufs, <=, refcount);
DTRACE_PROBE3(dnode__move, dnode_t *, odn, int64_t, refcount,
uint32_t, dbufs);
if (refcount > dbufs) {
rw_exit(&odn->dn_struct_rwlock);
zrl_exit(&odn->dn_handle->dnh_zrlock);
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_active);
return (KMEM_CBRC_LATER);
}
rw_exit(&odn->dn_struct_rwlock);
/*
* At this point we know that anyone with a hold on the dnode is not
* actively referencing it. The dnode is known and in a valid state to
* move. We're holding the locks needed to execute the critical section.
*/
dnode_move_impl(odn, ndn);
list_link_replace(&odn->dn_link, &ndn->dn_link);
/* If the dnode was safe to move, the refcount cannot have changed. */
ASSERT(refcount == zfs_refcount_count(&ndn->dn_holds));
ASSERT(dbufs == DN_DBUFS_COUNT(ndn));
zrl_exit(&ndn->dn_handle->dnh_zrlock); /* handle has moved */
mutex_exit(&os->os_lock);
return (KMEM_CBRC_YES);
}
#endif /* _KERNEL */
static void
dnode_slots_hold(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
zrl_add(&dnh->dnh_zrlock);
}
}
static void
dnode_slots_rele(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
if (zrl_is_locked(&dnh->dnh_zrlock))
zrl_exit(&dnh->dnh_zrlock);
else
zrl_remove(&dnh->dnh_zrlock);
}
}
static int
dnode_slots_tryenter(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
if (!zrl_tryenter(&dnh->dnh_zrlock)) {
for (int j = idx; j < i; j++) {
dnh = &children->dnc_children[j];
zrl_exit(&dnh->dnh_zrlock);
}
return (0);
}
}
return (1);
}
static void
dnode_set_slots(dnode_children_t *children, int idx, int slots, void *ptr)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
dnh->dnh_dnode = ptr;
}
}
static boolean_t
dnode_check_slots_free(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
/*
* If all dnode slots are either already free or
* evictable return B_TRUE.
*/
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
dnode_t *dn = dnh->dnh_dnode;
if (dn == DN_SLOT_FREE) {
continue;
} else if (DN_SLOT_IS_PTR(dn)) {
mutex_enter(&dn->dn_mtx);
boolean_t can_free = (dn->dn_type == DMU_OT_NONE &&
zfs_refcount_is_zero(&dn->dn_holds) &&
!DNODE_IS_DIRTY(dn));
mutex_exit(&dn->dn_mtx);
if (!can_free)
return (B_FALSE);
else
continue;
} else {
return (B_FALSE);
}
}
return (B_TRUE);
}
static void
dnode_reclaim_slots(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
ASSERT(zrl_is_locked(&dnh->dnh_zrlock));
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
ASSERT3S(dnh->dnh_dnode->dn_type, ==, DMU_OT_NONE);
dnode_destroy(dnh->dnh_dnode);
dnh->dnh_dnode = DN_SLOT_FREE;
}
}
}
void
dnode_free_interior_slots(dnode_t *dn)
{
dnode_children_t *children = dmu_buf_get_user(&dn->dn_dbuf->db);
int epb = dn->dn_dbuf->db.db_size >> DNODE_SHIFT;
int idx = (dn->dn_object & (epb - 1)) + 1;
int slots = dn->dn_num_slots - 1;
if (slots == 0)
return;
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
while (!dnode_slots_tryenter(children, idx, slots)) {
DNODE_STAT_BUMP(dnode_free_interior_lock_retry);
kpreempt(KPREEMPT_SYNC);
}
dnode_set_slots(children, idx, slots, DN_SLOT_FREE);
dnode_slots_rele(children, idx, slots);
}
void
dnode_special_close(dnode_handle_t *dnh)
{
dnode_t *dn = dnh->dnh_dnode;
/*
* Ensure dnode_rele_and_unlock() has released dn_mtx, after final
* zfs_refcount_remove()
*/
mutex_enter(&dn->dn_mtx);
if (zfs_refcount_count(&dn->dn_holds) > 0)
cv_wait(&dn->dn_nodnholds, &dn->dn_mtx);
mutex_exit(&dn->dn_mtx);
ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 0);
ASSERT(dn->dn_dbuf == NULL ||
dmu_buf_get_user(&dn->dn_dbuf->db) == NULL);
zrl_add(&dnh->dnh_zrlock);
dnode_destroy(dn); /* implicit zrl_remove() */
zrl_destroy(&dnh->dnh_zrlock);
dnh->dnh_dnode = NULL;
}
void
dnode_special_open(objset_t *os, dnode_phys_t *dnp, uint64_t object,
dnode_handle_t *dnh)
{
dnode_t *dn;
zrl_init(&dnh->dnh_zrlock);
VERIFY3U(1, ==, zrl_tryenter(&dnh->dnh_zrlock));
dn = dnode_create(os, dnp, NULL, object, dnh);
DNODE_VERIFY(dn);
zrl_exit(&dnh->dnh_zrlock);
}
static void
dnode_buf_evict_async(void *dbu)
{
dnode_children_t *dnc = dbu;
DNODE_STAT_BUMP(dnode_buf_evict);
for (int i = 0; i < dnc->dnc_count; i++) {
dnode_handle_t *dnh = &dnc->dnc_children[i];
dnode_t *dn;
/*
* The dnode handle lock guards against the dnode moving to
* another valid address, so there is no need here to guard
* against changes to or from NULL.
*/
if (!DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
zrl_destroy(&dnh->dnh_zrlock);
dnh->dnh_dnode = DN_SLOT_UNINIT;
continue;
}
zrl_add(&dnh->dnh_zrlock);
dn = dnh->dnh_dnode;
/*
* If there are holds on this dnode, then there should
* be holds on the dnode's containing dbuf as well; thus
* it wouldn't be eligible for eviction and this function
* would not have been called.
*/
ASSERT(zfs_refcount_is_zero(&dn->dn_holds));
ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
dnode_destroy(dn); /* implicit zrl_remove() for first slot */
zrl_destroy(&dnh->dnh_zrlock);
dnh->dnh_dnode = DN_SLOT_UNINIT;
}
kmem_free(dnc, sizeof (dnode_children_t) +
dnc->dnc_count * sizeof (dnode_handle_t));
}
/*
* When the DNODE_MUST_BE_FREE flag is set, the "slots" parameter is used
* to ensure the hole at the specified object offset is large enough to
* hold the dnode being created. The slots parameter is also used to ensure
* a dnode does not span multiple dnode blocks. In both of these cases, if
* a failure occurs, ENOSPC is returned. Keep in mind, these failure cases
* are only possible when using DNODE_MUST_BE_FREE.
*
* If the DNODE_MUST_BE_ALLOCATED flag is set, "slots" must be 0.
* dnode_hold_impl() will check if the requested dnode is already consumed
* as an extra dnode slot by an large dnode, in which case it returns
* ENOENT.
*
* If the DNODE_DRY_RUN flag is set, we don't actually hold the dnode, just
* return whether the hold would succeed or not. tag and dnp should set to
* NULL in this case.
*
* errors:
* EINVAL - Invalid object number or flags.
* ENOSPC - Hole too small to fulfill "slots" request (DNODE_MUST_BE_FREE)
* EEXIST - Refers to an allocated dnode (DNODE_MUST_BE_FREE)
* - Refers to a freeing dnode (DNODE_MUST_BE_FREE)
* - Refers to an interior dnode slot (DNODE_MUST_BE_ALLOCATED)
* ENOENT - The requested dnode is not allocated (DNODE_MUST_BE_ALLOCATED)
* - The requested dnode is being freed (DNODE_MUST_BE_ALLOCATED)
* EIO - I/O error when reading the meta dnode dbuf.
*
* succeeds even for free dnodes.
*/
int
dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
const void *tag, dnode_t **dnp)
{
int epb, idx, err;
int drop_struct_lock = FALSE;
int type;
uint64_t blk;
dnode_t *mdn, *dn;
dmu_buf_impl_t *db;
dnode_children_t *dnc;
dnode_phys_t *dn_block;
dnode_handle_t *dnh;
ASSERT(!(flag & DNODE_MUST_BE_ALLOCATED) || (slots == 0));
ASSERT(!(flag & DNODE_MUST_BE_FREE) || (slots > 0));
IMPLY(flag & DNODE_DRY_RUN, (tag == NULL) && (dnp == NULL));
/*
* If you are holding the spa config lock as writer, you shouldn't
* be asking the DMU to do *anything* unless it's the root pool
* which may require us to read from the root filesystem while
* holding some (not all) of the locks as writer.
*/
ASSERT(spa_config_held(os->os_spa, SCL_ALL, RW_WRITER) == 0 ||
(spa_is_root(os->os_spa) &&
spa_config_held(os->os_spa, SCL_STATE, RW_WRITER)));
ASSERT((flag & DNODE_MUST_BE_ALLOCATED) || (flag & DNODE_MUST_BE_FREE));
if (object == DMU_USERUSED_OBJECT || object == DMU_GROUPUSED_OBJECT ||
object == DMU_PROJECTUSED_OBJECT) {
if (object == DMU_USERUSED_OBJECT)
dn = DMU_USERUSED_DNODE(os);
else if (object == DMU_GROUPUSED_OBJECT)
dn = DMU_GROUPUSED_DNODE(os);
else
dn = DMU_PROJECTUSED_DNODE(os);
if (dn == NULL)
return (SET_ERROR(ENOENT));
type = dn->dn_type;
if ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE)
return (SET_ERROR(ENOENT));
if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)
return (SET_ERROR(EEXIST));
DNODE_VERIFY(dn);
/* Don't actually hold if dry run, just return 0 */
if (!(flag & DNODE_DRY_RUN)) {
(void) zfs_refcount_add(&dn->dn_holds, tag);
*dnp = dn;
}
return (0);
}
if (object == 0 || object >= DN_MAX_OBJECT)
return (SET_ERROR(EINVAL));
mdn = DMU_META_DNODE(os);
ASSERT(mdn->dn_object == DMU_META_DNODE_OBJECT);
DNODE_VERIFY(mdn);
if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) {
rw_enter(&mdn->dn_struct_rwlock, RW_READER);
drop_struct_lock = TRUE;
}
blk = dbuf_whichblock(mdn, 0, object * sizeof (dnode_phys_t));
db = dbuf_hold(mdn, blk, FTAG);
if (drop_struct_lock)
rw_exit(&mdn->dn_struct_rwlock);
if (db == NULL) {
DNODE_STAT_BUMP(dnode_hold_dbuf_hold);
return (SET_ERROR(EIO));
}
/*
* We do not need to decrypt to read the dnode so it doesn't matter
* if we get the encrypted or decrypted version.
*/
err = dbuf_read(db, NULL, DB_RF_CANFAIL |
DB_RF_NO_DECRYPT | DB_RF_NOPREFETCH);
if (err) {
DNODE_STAT_BUMP(dnode_hold_dbuf_read);
dbuf_rele(db, FTAG);
return (err);
}
ASSERT3U(db->db.db_size, >=, 1<<DNODE_SHIFT);
epb = db->db.db_size >> DNODE_SHIFT;
idx = object & (epb - 1);
dn_block = (dnode_phys_t *)db->db.db_data;
ASSERT(DB_DNODE(db)->dn_type == DMU_OT_DNODE);
dnc = dmu_buf_get_user(&db->db);
dnh = NULL;
if (dnc == NULL) {
dnode_children_t *winner;
int skip = 0;
dnc = kmem_zalloc(sizeof (dnode_children_t) +
epb * sizeof (dnode_handle_t), KM_SLEEP);
dnc->dnc_count = epb;
dnh = &dnc->dnc_children[0];
/* Initialize dnode slot status from dnode_phys_t */
for (int i = 0; i < epb; i++) {
zrl_init(&dnh[i].dnh_zrlock);
if (skip) {
skip--;
continue;
}
if (dn_block[i].dn_type != DMU_OT_NONE) {
int interior = dn_block[i].dn_extra_slots;
dnode_set_slots(dnc, i, 1, DN_SLOT_ALLOCATED);
dnode_set_slots(dnc, i + 1, interior,
DN_SLOT_INTERIOR);
skip = interior;
} else {
dnh[i].dnh_dnode = DN_SLOT_FREE;
skip = 0;
}
}
dmu_buf_init_user(&dnc->dnc_dbu, NULL,
dnode_buf_evict_async, NULL);
winner = dmu_buf_set_user(&db->db, &dnc->dnc_dbu);
if (winner != NULL) {
for (int i = 0; i < epb; i++)
zrl_destroy(&dnh[i].dnh_zrlock);
kmem_free(dnc, sizeof (dnode_children_t) +
epb * sizeof (dnode_handle_t));
dnc = winner;
}
}
ASSERT(dnc->dnc_count == epb);
if (flag & DNODE_MUST_BE_ALLOCATED) {
slots = 1;
dnode_slots_hold(dnc, idx, slots);
dnh = &dnc->dnc_children[idx];
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
dn = dnh->dnh_dnode;
} else if (dnh->dnh_dnode == DN_SLOT_INTERIOR) {
DNODE_STAT_BUMP(dnode_hold_alloc_interior);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(EEXIST));
} else if (dnh->dnh_dnode != DN_SLOT_ALLOCATED) {
DNODE_STAT_BUMP(dnode_hold_alloc_misses);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOENT));
} else {
dnode_slots_rele(dnc, idx, slots);
while (!dnode_slots_tryenter(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_alloc_lock_retry);
kpreempt(KPREEMPT_SYNC);
}
/*
* Someone else won the race and called dnode_create()
* after we checked DN_SLOT_IS_PTR() above but before
* we acquired the lock.
*/
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
DNODE_STAT_BUMP(dnode_hold_alloc_lock_misses);
dn = dnh->dnh_dnode;
} else {
dn = dnode_create(os, dn_block + idx, db,
object, dnh);
}
}
mutex_enter(&dn->dn_mtx);
if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg != 0) {
DNODE_STAT_BUMP(dnode_hold_alloc_type_none);
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOENT));
}
/* Don't actually hold if dry run, just return 0 */
if (flag & DNODE_DRY_RUN) {
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (0);
}
DNODE_STAT_BUMP(dnode_hold_alloc_hits);
} else if (flag & DNODE_MUST_BE_FREE) {
if (idx + slots - 1 >= DNODES_PER_BLOCK) {
DNODE_STAT_BUMP(dnode_hold_free_overflow);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOSPC));
}
dnode_slots_hold(dnc, idx, slots);
if (!dnode_check_slots_free(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_free_misses);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOSPC));
}
dnode_slots_rele(dnc, idx, slots);
while (!dnode_slots_tryenter(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_free_lock_retry);
kpreempt(KPREEMPT_SYNC);
}
if (!dnode_check_slots_free(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_free_lock_misses);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOSPC));
}
/*
* Allocated but otherwise free dnodes which would
* be in the interior of a multi-slot dnodes need
* to be freed. Single slot dnodes can be safely
* re-purposed as a performance optimization.
*/
if (slots > 1)
dnode_reclaim_slots(dnc, idx + 1, slots - 1);
dnh = &dnc->dnc_children[idx];
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
dn = dnh->dnh_dnode;
} else {
dn = dnode_create(os, dn_block + idx, db,
object, dnh);
}
mutex_enter(&dn->dn_mtx);
if (!zfs_refcount_is_zero(&dn->dn_holds) || dn->dn_free_txg) {
DNODE_STAT_BUMP(dnode_hold_free_refcount);
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(EEXIST));
}
/* Don't actually hold if dry run, just return 0 */
if (flag & DNODE_DRY_RUN) {
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (0);
}
dnode_set_slots(dnc, idx + 1, slots - 1, DN_SLOT_INTERIOR);
DNODE_STAT_BUMP(dnode_hold_free_hits);
} else {
dbuf_rele(db, FTAG);
return (SET_ERROR(EINVAL));
}
ASSERT0(dn->dn_free_txg);
if (zfs_refcount_add(&dn->dn_holds, tag) == 1)
dbuf_add_ref(db, dnh);
mutex_exit(&dn->dn_mtx);
/* Now we can rely on the hold to prevent the dnode from moving. */
dnode_slots_rele(dnc, idx, slots);
DNODE_VERIFY(dn);
ASSERT3P(dnp, !=, NULL);
ASSERT3P(dn->dn_dbuf, ==, db);
ASSERT3U(dn->dn_object, ==, object);
dbuf_rele(db, FTAG);
*dnp = dn;
return (0);
}
/*
* Return held dnode if the object is allocated, NULL if not.
*/
int
dnode_hold(objset_t *os, uint64_t object, const void *tag, dnode_t **dnp)
{
return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, 0, tag,
dnp));
}
/*
* Can only add a reference if there is already at least one
* reference on the dnode. Returns FALSE if unable to add a
* new reference.
*/
boolean_t
dnode_add_ref(dnode_t *dn, const void *tag)
{
mutex_enter(&dn->dn_mtx);
if (zfs_refcount_is_zero(&dn->dn_holds)) {
mutex_exit(&dn->dn_mtx);
return (FALSE);
}
VERIFY(1 < zfs_refcount_add(&dn->dn_holds, tag));
mutex_exit(&dn->dn_mtx);
return (TRUE);
}
void
dnode_rele(dnode_t *dn, const void *tag)
{
mutex_enter(&dn->dn_mtx);
dnode_rele_and_unlock(dn, tag, B_FALSE);
}
void
dnode_rele_and_unlock(dnode_t *dn, const void *tag, boolean_t evicting)
{
uint64_t refs;
/* Get while the hold prevents the dnode from moving. */
dmu_buf_impl_t *db = dn->dn_dbuf;
dnode_handle_t *dnh = dn->dn_handle;
refs = zfs_refcount_remove(&dn->dn_holds, tag);
if (refs == 0)
cv_broadcast(&dn->dn_nodnholds);
mutex_exit(&dn->dn_mtx);
/* dnode could get destroyed at this point, so don't use it anymore */
/*
* It's unsafe to release the last hold on a dnode by dnode_rele() or
* indirectly by dbuf_rele() while relying on the dnode handle to
* prevent the dnode from moving, since releasing the last hold could
* result in the dnode's parent dbuf evicting its dnode handles. For
* that reason anyone calling dnode_rele() or dbuf_rele() without some
* other direct or indirect hold on the dnode must first drop the dnode
* handle.
*/
#ifdef ZFS_DEBUG
ASSERT(refs > 0 || dnh->dnh_zrlock.zr_owner != curthread);
#endif
/* NOTE: the DNODE_DNODE does not have a dn_dbuf */
if (refs == 0 && db != NULL) {
/*
* Another thread could add a hold to the dnode handle in
* dnode_hold_impl() while holding the parent dbuf. Since the
* hold on the parent dbuf prevents the handle from being
* destroyed, the hold on the handle is OK. We can't yet assert
* that the handle has zero references, but that will be
* asserted anyway when the handle gets destroyed.
*/
mutex_enter(&db->db_mtx);
dbuf_rele_and_unlock(db, dnh, evicting);
}
}
/*
* Test whether we can create a dnode at the specified location.
*/
int
dnode_try_claim(objset_t *os, uint64_t object, int slots)
{
return (dnode_hold_impl(os, object, DNODE_MUST_BE_FREE | DNODE_DRY_RUN,
slots, NULL, NULL));
}
/*
* Checks if the dnode contains any uncommitted dirty records.
*/
boolean_t
dnode_is_dirty(dnode_t *dn)
{
mutex_enter(&dn->dn_mtx);
for (int i = 0; i < TXG_SIZE; i++) {
if (multilist_link_active(&dn->dn_dirty_link[i])) {
mutex_exit(&dn->dn_mtx);
return (B_TRUE);
}
}
mutex_exit(&dn->dn_mtx);
return (B_FALSE);
}
void
dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
{
objset_t *os = dn->dn_objset;
uint64_t txg = tx->tx_txg;
if (DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
dsl_dataset_dirty(os->os_dsl_dataset, tx);
return;
}
DNODE_VERIFY(dn);
#ifdef ZFS_DEBUG
mutex_enter(&dn->dn_mtx);
ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg);
ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg);
mutex_exit(&dn->dn_mtx);
#endif
/*
* Determine old uid/gid when necessary
*/
dmu_objset_userquota_get_ids(dn, B_TRUE, tx);
multilist_t *dirtylist = &os->os_dirty_dnodes[txg & TXG_MASK];
multilist_sublist_t *mls = multilist_sublist_lock_obj(dirtylist, dn);
/*
* If we are already marked dirty, we're done.
*/
if (multilist_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) {
multilist_sublist_unlock(mls);
return;
}
ASSERT(!zfs_refcount_is_zero(&dn->dn_holds) ||
!avl_is_empty(&dn->dn_dbufs));
ASSERT(dn->dn_datablksz != 0);
ASSERT0(dn->dn_next_bonuslen[txg & TXG_MASK]);
ASSERT0(dn->dn_next_blksz[txg & TXG_MASK]);
ASSERT0(dn->dn_next_bonustype[txg & TXG_MASK]);
dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n",
(u_longlong_t)dn->dn_object, (u_longlong_t)txg);
multilist_sublist_insert_head(mls, dn);
multilist_sublist_unlock(mls);
/*
* The dnode maintains a hold on its containing dbuf as
* long as there are holds on it. Each instantiated child
* dbuf maintains a hold on the dnode. When the last child
* drops its hold, the dnode will drop its hold on the
* containing dbuf. We add a "dirty hold" here so that the
* dnode will hang around after we finish processing its
* children.
*/
VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg));
(void) dbuf_dirty(dn->dn_dbuf, tx);
dsl_dataset_dirty(os->os_dsl_dataset, tx);
}
void
dnode_free(dnode_t *dn, dmu_tx_t *tx)
{
mutex_enter(&dn->dn_mtx);
if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) {
mutex_exit(&dn->dn_mtx);
return;
}
dn->dn_free_txg = tx->tx_txg;
mutex_exit(&dn->dn_mtx);
dnode_setdirty(dn, tx);
}
/*
* Try to change the block size for the indicated dnode. This can only
* succeed if there are no blocks allocated or dirty beyond first block
*/
int
dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
{
dmu_buf_impl_t *db;
int err;
ASSERT3U(size, <=, spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
if (size == 0)
size = SPA_MINBLOCKSIZE;
else
size = P2ROUNDUP(size, SPA_MINBLOCKSIZE);
if (ibs == dn->dn_indblkshift)
ibs = 0;
- if (size >> SPA_MINBLOCKSHIFT == dn->dn_datablkszsec && ibs == 0)
+ if (size == dn->dn_datablksz && ibs == 0)
return (0);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
/* Check for any allocated blocks beyond the first */
if (dn->dn_maxblkid != 0)
goto fail;
mutex_enter(&dn->dn_dbufs_mtx);
for (db = avl_first(&dn->dn_dbufs); db != NULL;
db = AVL_NEXT(&dn->dn_dbufs, db)) {
if (db->db_blkid != 0 && db->db_blkid != DMU_BONUS_BLKID &&
db->db_blkid != DMU_SPILL_BLKID) {
mutex_exit(&dn->dn_dbufs_mtx);
goto fail;
}
}
mutex_exit(&dn->dn_dbufs_mtx);
if (ibs && dn->dn_nlevels != 1)
goto fail;
- /* resize the old block */
- err = dbuf_hold_impl(dn, 0, 0, TRUE, FALSE, FTAG, &db);
- if (err == 0) {
- dbuf_new_size(db, size, tx);
- } else if (err != ENOENT) {
- goto fail;
- }
-
- dnode_setdblksz(dn, size);
dnode_setdirty(dn, tx);
- dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = size;
+ if (size != dn->dn_datablksz) {
+ /* resize the old block */
+ err = dbuf_hold_impl(dn, 0, 0, TRUE, FALSE, FTAG, &db);
+ if (err == 0) {
+ dbuf_new_size(db, size, tx);
+ } else if (err != ENOENT) {
+ goto fail;
+ }
+
+ dnode_setdblksz(dn, size);
+ dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = size;
+ if (db)
+ dbuf_rele(db, FTAG);
+ }
if (ibs) {
dn->dn_indblkshift = ibs;
- dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs;
+ dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
}
- /* release after we have fixed the blocksize in the dnode */
- if (db)
- dbuf_rele(db, FTAG);
rw_exit(&dn->dn_struct_rwlock);
return (0);
fail:
rw_exit(&dn->dn_struct_rwlock);
return (SET_ERROR(ENOTSUP));
}
static void
dnode_set_nlevels_impl(dnode_t *dn, int new_nlevels, dmu_tx_t *tx)
{
uint64_t txgoff = tx->tx_txg & TXG_MASK;
int old_nlevels = dn->dn_nlevels;
dmu_buf_impl_t *db;
list_t *list;
dbuf_dirty_record_t *new, *dr, *dr_next;
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
ASSERT3U(new_nlevels, >, dn->dn_nlevels);
dn->dn_nlevels = new_nlevels;
ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]);
dn->dn_next_nlevels[txgoff] = new_nlevels;
/* dirty the left indirects */
db = dbuf_hold_level(dn, old_nlevels, 0, FTAG);
ASSERT(db != NULL);
new = dbuf_dirty(db, tx);
dbuf_rele(db, FTAG);
/* transfer the dirty records to the new indirect */
mutex_enter(&dn->dn_mtx);
mutex_enter(&new->dt.di.dr_mtx);
list = &dn->dn_dirty_records[txgoff];
for (dr = list_head(list); dr; dr = dr_next) {
dr_next = list_next(&dn->dn_dirty_records[txgoff], dr);
IMPLY(dr->dr_dbuf == NULL, old_nlevels == 1);
if (dr->dr_dbuf == NULL ||
(dr->dr_dbuf->db_level == old_nlevels - 1 &&
dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID)) {
list_remove(&dn->dn_dirty_records[txgoff], dr);
list_insert_tail(&new->dt.di.dr_children, dr);
dr->dr_parent = new;
}
}
mutex_exit(&new->dt.di.dr_mtx);
mutex_exit(&dn->dn_mtx);
}
int
dnode_set_nlevels(dnode_t *dn, int nlevels, dmu_tx_t *tx)
{
int ret = 0;
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
if (dn->dn_nlevels == nlevels) {
ret = 0;
goto out;
} else if (nlevels < dn->dn_nlevels) {
ret = SET_ERROR(EINVAL);
goto out;
}
dnode_set_nlevels_impl(dn, nlevels, tx);
out:
rw_exit(&dn->dn_struct_rwlock);
return (ret);
}
/* read-holding callers must not rely on the lock being continuously held */
void
dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t have_read,
boolean_t force)
{
int epbs, new_nlevels;
uint64_t sz;
ASSERT(blkid != DMU_BONUS_BLKID);
ASSERT(have_read ?
RW_READ_HELD(&dn->dn_struct_rwlock) :
RW_WRITE_HELD(&dn->dn_struct_rwlock));
/*
* if we have a read-lock, check to see if we need to do any work
* before upgrading to a write-lock.
*/
if (have_read) {
if (blkid <= dn->dn_maxblkid)
return;
if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
rw_exit(&dn->dn_struct_rwlock);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
}
}
/*
* Raw sends (indicated by the force flag) require that we take the
* given blkid even if the value is lower than the current value.
*/
if (!force && blkid <= dn->dn_maxblkid)
goto out;
/*
* We use the (otherwise unused) top bit of dn_next_maxblkid[txgoff]
* to indicate that this field is set. This allows us to set the
* maxblkid to 0 on an existing object in dnode_sync().
*/
dn->dn_maxblkid = blkid;
dn->dn_next_maxblkid[tx->tx_txg & TXG_MASK] =
blkid | DMU_NEXT_MAXBLKID_SET;
/*
* Compute the number of levels necessary to support the new maxblkid.
* Raw sends will ensure nlevels is set correctly for us.
*/
new_nlevels = 1;
epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
for (sz = dn->dn_nblkptr;
sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs)
new_nlevels++;
ASSERT3U(new_nlevels, <=, DN_MAX_LEVELS);
if (!force) {
if (new_nlevels > dn->dn_nlevels)
dnode_set_nlevels_impl(dn, new_nlevels, tx);
} else {
ASSERT3U(dn->dn_nlevels, >=, new_nlevels);
}
out:
if (have_read)
rw_downgrade(&dn->dn_struct_rwlock);
}
static void
dnode_dirty_l1(dnode_t *dn, uint64_t l1blkid, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dbuf_hold_level(dn, 1, l1blkid, FTAG);
if (db != NULL) {
dmu_buf_will_dirty(&db->db, tx);
dbuf_rele(db, FTAG);
}
}
/*
* Dirty all the in-core level-1 dbufs in the range specified by start_blkid
* and end_blkid.
*/
static void
dnode_dirty_l1range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db_search;
dmu_buf_impl_t *db;
avl_index_t where;
db_search = kmem_zalloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
mutex_enter(&dn->dn_dbufs_mtx);
db_search->db_level = 1;
db_search->db_blkid = start_blkid + 1;
db_search->db_state = DB_SEARCH;
for (;;) {
db = avl_find(&dn->dn_dbufs, db_search, &where);
if (db == NULL)
db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
if (db == NULL || db->db_level != 1 ||
db->db_blkid >= end_blkid) {
break;
}
/*
* Setup the next blkid we want to search for.
*/
db_search->db_blkid = db->db_blkid + 1;
ASSERT3U(db->db_blkid, >=, start_blkid);
/*
* If the dbuf transitions to DB_EVICTING while we're trying
* to dirty it, then we will be unable to discover it in
* the dbuf hash table. This will result in a call to
* dbuf_create() which needs to acquire the dn_dbufs_mtx
* lock. To avoid a deadlock, we drop the lock before
* dirtying the level-1 dbuf.
*/
mutex_exit(&dn->dn_dbufs_mtx);
dnode_dirty_l1(dn, db->db_blkid, tx);
mutex_enter(&dn->dn_dbufs_mtx);
}
#ifdef ZFS_DEBUG
/*
* Walk all the in-core level-1 dbufs and verify they have been dirtied.
*/
db_search->db_level = 1;
db_search->db_blkid = start_blkid + 1;
db_search->db_state = DB_SEARCH;
db = avl_find(&dn->dn_dbufs, db_search, &where);
if (db == NULL)
db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
for (; db != NULL; db = AVL_NEXT(&dn->dn_dbufs, db)) {
if (db->db_level != 1 || db->db_blkid >= end_blkid)
break;
if (db->db_state != DB_EVICTING)
ASSERT(db->db_dirtycnt > 0);
}
#endif
kmem_free(db_search, sizeof (dmu_buf_impl_t));
mutex_exit(&dn->dn_dbufs_mtx);
}
void
dnode_set_dirtyctx(dnode_t *dn, dmu_tx_t *tx, const void *tag)
{
/*
* Don't set dirtyctx to SYNC if we're just modifying this as we
* initialize the objset.
*/
if (dn->dn_dirtyctx == DN_UNDIRTIED) {
dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
if (ds != NULL) {
rrw_enter(&ds->ds_bp_rwlock, RW_READER, tag);
}
if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
if (dmu_tx_is_syncing(tx))
dn->dn_dirtyctx = DN_DIRTY_SYNC;
else
dn->dn_dirtyctx = DN_DIRTY_OPEN;
dn->dn_dirtyctx_firstset = tag;
}
if (ds != NULL) {
rrw_exit(&ds->ds_bp_rwlock, tag);
}
}
}
static void
dnode_partial_zero(dnode_t *dn, uint64_t off, uint64_t blkoff, uint64_t len,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db;
int res;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
res = dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, 0, off), TRUE, FALSE,
FTAG, &db);
rw_exit(&dn->dn_struct_rwlock);
if (res == 0) {
db_lock_type_t dblt;
boolean_t dirty;
dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
/* don't dirty if not on disk and not dirty */
dirty = !list_is_empty(&db->db_dirty_records) ||
(db->db_blkptr && !BP_IS_HOLE(db->db_blkptr));
dmu_buf_unlock_parent(db, dblt, FTAG);
if (dirty) {
caddr_t data;
dmu_buf_will_dirty(&db->db, tx);
data = db->db.db_data;
memset(data + blkoff, 0, len);
}
dbuf_rele(db, FTAG);
}
}
void
dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
{
uint64_t blkoff, blkid, nblks;
int blksz, blkshift, head, tail;
int trunc = FALSE;
int epbs;
blksz = dn->dn_datablksz;
blkshift = dn->dn_datablkshift;
epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
if (len == DMU_OBJECT_END) {
len = UINT64_MAX - off;
trunc = TRUE;
}
/*
* First, block align the region to free:
*/
if (ISP2(blksz)) {
head = P2NPHASE(off, blksz);
blkoff = P2PHASE(off, blksz);
if ((off >> blkshift) > dn->dn_maxblkid)
return;
} else {
ASSERT(dn->dn_maxblkid == 0);
if (off == 0 && len >= blksz) {
/*
* Freeing the whole block; fast-track this request.
*/
blkid = 0;
nblks = 1;
if (dn->dn_nlevels > 1) {
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dnode_dirty_l1(dn, 0, tx);
rw_exit(&dn->dn_struct_rwlock);
}
goto done;
} else if (off >= blksz) {
/* Freeing past end-of-data */
return;
} else {
/* Freeing part of the block. */
head = blksz - off;
ASSERT3U(head, >, 0);
}
blkoff = off;
}
/* zero out any partial block data at the start of the range */
if (head) {
ASSERT3U(blkoff + head, ==, blksz);
if (len < head)
head = len;
dnode_partial_zero(dn, off, blkoff, head, tx);
off += head;
len -= head;
}
/* If the range was less than one block, we're done */
if (len == 0)
return;
/* If the remaining range is past end of file, we're done */
if ((off >> blkshift) > dn->dn_maxblkid)
return;
ASSERT(ISP2(blksz));
if (trunc)
tail = 0;
else
tail = P2PHASE(len, blksz);
ASSERT0(P2PHASE(off, blksz));
/* zero out any partial block data at the end of the range */
if (tail) {
if (len < tail)
tail = len;
dnode_partial_zero(dn, off + len, 0, tail, tx);
len -= tail;
}
/* If the range did not include a full block, we are done */
if (len == 0)
return;
ASSERT(IS_P2ALIGNED(off, blksz));
ASSERT(trunc || IS_P2ALIGNED(len, blksz));
blkid = off >> blkshift;
nblks = len >> blkshift;
if (trunc)
nblks += 1;
/*
* Dirty all the indirect blocks in this range. Note that only
* the first and last indirect blocks can actually be written
* (if they were partially freed) -- they must be dirtied, even if
* they do not exist on disk yet. The interior blocks will
* be freed by free_children(), so they will not actually be written.
* Even though these interior blocks will not be written, we
* dirty them for two reasons:
*
* - It ensures that the indirect blocks remain in memory until
* syncing context. (They have already been prefetched by
* dmu_tx_hold_free(), so we don't have to worry about reading
* them serially here.)
*
* - The dirty space accounting will put pressure on the txg sync
* mechanism to begin syncing, and to delay transactions if there
* is a large amount of freeing. Even though these indirect
* blocks will not be written, we could need to write the same
* amount of space if we copy the freed BPs into deadlists.
*/
if (dn->dn_nlevels > 1) {
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
uint64_t first, last;
first = blkid >> epbs;
dnode_dirty_l1(dn, first, tx);
if (trunc)
last = dn->dn_maxblkid >> epbs;
else
last = (blkid + nblks - 1) >> epbs;
if (last != first)
dnode_dirty_l1(dn, last, tx);
dnode_dirty_l1range(dn, first, last, tx);
int shift = dn->dn_datablkshift + dn->dn_indblkshift -
SPA_BLKPTRSHIFT;
for (uint64_t i = first + 1; i < last; i++) {
/*
* Set i to the blockid of the next non-hole
* level-1 indirect block at or after i. Note
* that dnode_next_offset() operates in terms of
* level-0-equivalent bytes.
*/
uint64_t ibyte = i << shift;
int err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK,
&ibyte, 2, 1, 0);
i = ibyte >> shift;
if (i >= last)
break;
/*
* Normally we should not see an error, either
* from dnode_next_offset() or dbuf_hold_level()
* (except for ESRCH from dnode_next_offset).
* If there is an i/o error, then when we read
* this block in syncing context, it will use
* ZIO_FLAG_MUSTSUCCEED, and thus hang/panic according
* to the "failmode" property. dnode_next_offset()
* doesn't have a flag to indicate MUSTSUCCEED.
*/
if (err != 0)
break;
dnode_dirty_l1(dn, i, tx);
}
rw_exit(&dn->dn_struct_rwlock);
}
done:
/*
* Add this range to the dnode range list.
* We will finish up this free operation in the syncing phase.
*/
mutex_enter(&dn->dn_mtx);
{
int txgoff = tx->tx_txg & TXG_MASK;
if (dn->dn_free_ranges[txgoff] == NULL) {
dn->dn_free_ranges[txgoff] = range_tree_create(NULL,
RANGE_SEG64, NULL, 0, 0);
}
range_tree_clear(dn->dn_free_ranges[txgoff], blkid, nblks);
range_tree_add(dn->dn_free_ranges[txgoff], blkid, nblks);
}
dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
(u_longlong_t)blkid, (u_longlong_t)nblks,
(u_longlong_t)tx->tx_txg);
mutex_exit(&dn->dn_mtx);
dbuf_free_range(dn, blkid, blkid + nblks - 1, tx);
dnode_setdirty(dn, tx);
}
static boolean_t
dnode_spill_freed(dnode_t *dn)
{
int i;
mutex_enter(&dn->dn_mtx);
for (i = 0; i < TXG_SIZE; i++) {
if (dn->dn_rm_spillblk[i] == DN_KILL_SPILLBLK)
break;
}
mutex_exit(&dn->dn_mtx);
return (i < TXG_SIZE);
}
/* return TRUE if this blkid was freed in a recent txg, or FALSE if it wasn't */
uint64_t
dnode_block_freed(dnode_t *dn, uint64_t blkid)
{
int i;
if (blkid == DMU_BONUS_BLKID)
return (FALSE);
if (dn->dn_free_txg)
return (TRUE);
if (blkid == DMU_SPILL_BLKID)
return (dnode_spill_freed(dn));
mutex_enter(&dn->dn_mtx);
for (i = 0; i < TXG_SIZE; i++) {
if (dn->dn_free_ranges[i] != NULL &&
range_tree_contains(dn->dn_free_ranges[i], blkid, 1))
break;
}
mutex_exit(&dn->dn_mtx);
return (i < TXG_SIZE);
}
/* call from syncing context when we actually write/free space for this dnode */
void
dnode_diduse_space(dnode_t *dn, int64_t delta)
{
uint64_t space;
dprintf_dnode(dn, "dn=%p dnp=%p used=%llu delta=%lld\n",
dn, dn->dn_phys,
(u_longlong_t)dn->dn_phys->dn_used,
(longlong_t)delta);
mutex_enter(&dn->dn_mtx);
space = DN_USED_BYTES(dn->dn_phys);
if (delta > 0) {
ASSERT3U(space + delta, >=, space); /* no overflow */
} else {
ASSERT3U(space, >=, -delta); /* no underflow */
}
space += delta;
if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) {
ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0);
ASSERT0(P2PHASE(space, 1<<DEV_BSHIFT));
dn->dn_phys->dn_used = space >> DEV_BSHIFT;
} else {
dn->dn_phys->dn_used = space;
dn->dn_phys->dn_flags |= DNODE_FLAG_USED_BYTES;
}
mutex_exit(&dn->dn_mtx);
}
/*
* Scans a block at the indicated "level" looking for a hole or data,
* depending on 'flags'.
*
* If level > 0, then we are scanning an indirect block looking at its
* pointers. If level == 0, then we are looking at a block of dnodes.
*
* If we don't find what we are looking for in the block, we return ESRCH.
* Otherwise, return with *offset pointing to the beginning (if searching
* forwards) or end (if searching backwards) of the range covered by the
* block pointer we matched on (or dnode).
*
* The basic search algorithm used below by dnode_next_offset() is to
* use this function to search up the block tree (widen the search) until
* we find something (i.e., we don't return ESRCH) and then search back
* down the tree (narrow the search) until we reach our original search
* level.
*/
static int
dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
int lvl, uint64_t blkfill, uint64_t txg)
{
dmu_buf_impl_t *db = NULL;
void *data = NULL;
uint64_t epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
uint64_t epb = 1ULL << epbs;
uint64_t minfill, maxfill;
boolean_t hole;
int i, inc, error, span;
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
hole = ((flags & DNODE_FIND_HOLE) != 0);
inc = (flags & DNODE_FIND_BACKWARDS) ? -1 : 1;
ASSERT(txg == 0 || !hole);
if (lvl == dn->dn_phys->dn_nlevels) {
error = 0;
epb = dn->dn_phys->dn_nblkptr;
data = dn->dn_phys->dn_blkptr;
} else {
uint64_t blkid = dbuf_whichblock(dn, lvl, *offset);
error = dbuf_hold_impl(dn, lvl, blkid, TRUE, FALSE, FTAG, &db);
if (error) {
if (error != ENOENT)
return (error);
if (hole)
return (0);
/*
* This can only happen when we are searching up
* the block tree for data. We don't really need to
* adjust the offset, as we will just end up looking
* at the pointer to this block in its parent, and its
* going to be unallocated, so we will skip over it.
*/
return (SET_ERROR(ESRCH));
}
error = dbuf_read(db, NULL,
DB_RF_CANFAIL | DB_RF_HAVESTRUCT |
DB_RF_NO_DECRYPT | DB_RF_NOPREFETCH);
if (error) {
dbuf_rele(db, FTAG);
return (error);
}
data = db->db.db_data;
rw_enter(&db->db_rwlock, RW_READER);
}
if (db != NULL && txg != 0 && (db->db_blkptr == NULL ||
db->db_blkptr->blk_birth <= txg ||
BP_IS_HOLE(db->db_blkptr))) {
/*
* This can only happen when we are searching up the tree
* and these conditions mean that we need to keep climbing.
*/
error = SET_ERROR(ESRCH);
} else if (lvl == 0) {
dnode_phys_t *dnp = data;
ASSERT(dn->dn_type == DMU_OT_DNODE);
ASSERT(!(flags & DNODE_FIND_BACKWARDS));
for (i = (*offset >> DNODE_SHIFT) & (blkfill - 1);
i < blkfill; i += dnp[i].dn_extra_slots + 1) {
if ((dnp[i].dn_type == DMU_OT_NONE) == hole)
break;
}
if (i == blkfill)
error = SET_ERROR(ESRCH);
*offset = (*offset & ~(DNODE_BLOCK_SIZE - 1)) +
(i << DNODE_SHIFT);
} else {
blkptr_t *bp = data;
uint64_t start = *offset;
span = (lvl - 1) * epbs + dn->dn_datablkshift;
minfill = 0;
maxfill = blkfill << ((lvl - 1) * epbs);
if (hole)
maxfill--;
else
minfill++;
if (span >= 8 * sizeof (*offset)) {
/* This only happens on the highest indirection level */
ASSERT3U((lvl - 1), ==, dn->dn_phys->dn_nlevels - 1);
*offset = 0;
} else {
*offset = *offset >> span;
}
for (i = BF64_GET(*offset, 0, epbs);
i >= 0 && i < epb; i += inc) {
if (BP_GET_FILL(&bp[i]) >= minfill &&
BP_GET_FILL(&bp[i]) <= maxfill &&
(hole || bp[i].blk_birth > txg))
break;
if (inc > 0 || *offset > 0)
*offset += inc;
}
if (span >= 8 * sizeof (*offset)) {
*offset = start;
} else {
*offset = *offset << span;
}
if (inc < 0) {
/* traversing backwards; position offset at the end */
if (span < 8 * sizeof (*offset))
*offset = MIN(*offset + (1ULL << span) - 1,
start);
} else if (*offset < start) {
*offset = start;
}
if (i < 0 || i >= epb)
error = SET_ERROR(ESRCH);
}
if (db != NULL) {
rw_exit(&db->db_rwlock);
dbuf_rele(db, FTAG);
}
return (error);
}
/*
* Find the next hole, data, or sparse region at or after *offset.
* The value 'blkfill' tells us how many items we expect to find
* in an L0 data block; this value is 1 for normal objects,
* DNODES_PER_BLOCK for the meta dnode, and some fraction of
* DNODES_PER_BLOCK when searching for sparse regions thereof.
*
* Examples:
*
* dnode_next_offset(dn, flags, offset, 1, 1, 0);
* Finds the next/previous hole/data in a file.
* Used in dmu_offset_next().
*
* dnode_next_offset(mdn, flags, offset, 0, DNODES_PER_BLOCK, txg);
* Finds the next free/allocated dnode an objset's meta-dnode.
* Only finds objects that have new contents since txg (ie.
* bonus buffer changes and content removal are ignored).
* Used in dmu_object_next().
*
* dnode_next_offset(mdn, DNODE_FIND_HOLE, offset, 2, DNODES_PER_BLOCK >> 2, 0);
* Finds the next L2 meta-dnode bp that's at most 1/4 full.
* Used in dmu_object_alloc().
*/
int
dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
int minlvl, uint64_t blkfill, uint64_t txg)
{
uint64_t initial_offset = *offset;
int lvl, maxlvl;
int error = 0;
if (!(flags & DNODE_FIND_HAVELOCK))
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_phys->dn_nlevels == 0) {
error = SET_ERROR(ESRCH);
goto out;
}
if (dn->dn_datablkshift == 0) {
if (*offset < dn->dn_datablksz) {
if (flags & DNODE_FIND_HOLE)
*offset = dn->dn_datablksz;
} else {
error = SET_ERROR(ESRCH);
}
goto out;
}
maxlvl = dn->dn_phys->dn_nlevels;
for (lvl = minlvl; lvl <= maxlvl; lvl++) {
error = dnode_next_offset_level(dn,
flags, offset, lvl, blkfill, txg);
if (error != ESRCH)
break;
}
while (error == 0 && --lvl >= minlvl) {
error = dnode_next_offset_level(dn,
flags, offset, lvl, blkfill, txg);
}
/*
* There's always a "virtual hole" at the end of the object, even
* if all BP's which physically exist are non-holes.
*/
if ((flags & DNODE_FIND_HOLE) && error == ESRCH && txg == 0 &&
minlvl == 1 && blkfill == 1 && !(flags & DNODE_FIND_BACKWARDS)) {
error = 0;
}
if (error == 0 && (flags & DNODE_FIND_BACKWARDS ?
initial_offset < *offset : initial_offset > *offset))
error = SET_ERROR(ESRCH);
out:
if (!(flags & DNODE_FIND_HAVELOCK))
rw_exit(&dn->dn_struct_rwlock);
return (error);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(dnode_hold);
EXPORT_SYMBOL(dnode_rele);
EXPORT_SYMBOL(dnode_set_nlevels);
EXPORT_SYMBOL(dnode_set_blksz);
EXPORT_SYMBOL(dnode_free_range);
EXPORT_SYMBOL(dnode_evict_dbufs);
EXPORT_SYMBOL(dnode_evict_bonus);
#endif
ZFS_MODULE_PARAM(zfs, zfs_, default_bs, INT, ZMOD_RW,
"Default dnode block shift");
ZFS_MODULE_PARAM(zfs, zfs_, default_ibs, INT, ZMOD_RW,
"Default dnode indirect block shift");
diff --git a/sys/contrib/openzfs/module/zfs/dsl_deadlist.c b/sys/contrib/openzfs/module/zfs/dsl_deadlist.c
index 181efd8ed69c..47c234f76c40 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_deadlist.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_deadlist.c
@@ -1,1116 +1,1116 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2019 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/zfs_context.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
/*
* Deadlist concurrency:
*
* Deadlists can only be modified from the syncing thread.
*
* Except for dsl_deadlist_insert(), it can only be modified with the
* dp_config_rwlock held with RW_WRITER.
*
* The accessors (dsl_deadlist_space() and dsl_deadlist_space_range()) can
* be called concurrently, from open context, with the dl_config_rwlock held
* with RW_READER.
*
* Therefore, we only need to provide locking between dsl_deadlist_insert() and
* the accessors, protecting:
* dl_phys->dl_used,comp,uncomp
* and protecting the dl_tree from being loaded.
* The locking is provided by dl_lock. Note that locking on the bpobj_t
* provides its own locking, and dl_oldfmt is immutable.
*/
/*
* Livelist Overview
* ================
*
* Livelists use the same 'deadlist_t' struct as deadlists and are also used
* to track blkptrs over the lifetime of a dataset. Livelists however, belong
* to clones and track the blkptrs that are clone-specific (were born after
* the clone's creation). The exception is embedded block pointers which are
* not included in livelists because they do not need to be freed.
*
* When it comes time to delete the clone, the livelist provides a quick
* reference as to what needs to be freed. For this reason, livelists also track
* when clone-specific blkptrs are freed before deletion to prevent double
* frees. Each blkptr in a livelist is marked as a FREE or an ALLOC and the
* deletion algorithm iterates backwards over the livelist, matching
* FREE/ALLOC pairs and then freeing those ALLOCs which remain. livelists
* are also updated in the case when blkptrs are remapped: the old version
* of the blkptr is cancelled out with a FREE and the new version is tracked
* with an ALLOC.
*
* To bound the amount of memory required for deletion, livelists over a
* certain size are spread over multiple entries. Entries are grouped by
* birth txg so we can be sure the ALLOC/FREE pair for a given blkptr will
* be in the same entry. This allows us to delete livelists incrementally
* over multiple syncs, one entry at a time.
*
* During the lifetime of the clone, livelists can get extremely large.
* Their size is managed by periodic condensing (preemptively cancelling out
* FREE/ALLOC pairs). Livelists are disabled when a clone is promoted or when
* the shared space between the clone and its origin is so small that it
* doesn't make sense to use livelists anymore.
*/
/*
* The threshold sublist size at which we create a new sub-livelist for the
* next txg. However, since blkptrs of the same transaction group must be in
* the same sub-list, the actual sublist size may exceed this. When picking the
* size we had to balance the fact that larger sublists mean fewer sublists
* (decreasing the cost of insertion) against the consideration that sublists
* will be loaded into memory and shouldn't take up an inordinate amount of
* space. We settled on ~500000 entries, corresponding to roughly 128M.
*/
uint64_t zfs_livelist_max_entries = 500000;
/*
* We can approximate how much of a performance gain a livelist will give us
* based on the percentage of blocks shared between the clone and its origin.
* 0 percent shared means that the clone has completely diverged and that the
* old method is maximally effective: every read from the block tree will
* result in lots of frees. Livelists give us gains when they track blocks
* scattered across the tree, when one read in the old method might only
* result in a few frees. Once the clone has been overwritten enough,
* writes are no longer sparse and we'll no longer get much of a benefit from
* tracking them with a livelist. We chose a lower limit of 75 percent shared
* (25 percent overwritten). This means that 1/4 of all block pointers will be
* freed (e.g. each read frees 256, out of a max of 1024) so we expect livelists
* to make deletion 4x faster. Once the amount of shared space drops below this
* threshold, the clone will revert to the old deletion method.
*/
int zfs_livelist_min_percent_shared = 75;
static int
dsl_deadlist_compare(const void *arg1, const void *arg2)
{
const dsl_deadlist_entry_t *dle1 = arg1;
const dsl_deadlist_entry_t *dle2 = arg2;
return (TREE_CMP(dle1->dle_mintxg, dle2->dle_mintxg));
}
static int
dsl_deadlist_cache_compare(const void *arg1, const void *arg2)
{
const dsl_deadlist_cache_entry_t *dlce1 = arg1;
const dsl_deadlist_cache_entry_t *dlce2 = arg2;
return (TREE_CMP(dlce1->dlce_mintxg, dlce2->dlce_mintxg));
}
static void
dsl_deadlist_load_tree(dsl_deadlist_t *dl)
{
zap_cursor_t zc;
zap_attribute_t za;
int error;
ASSERT(MUTEX_HELD(&dl->dl_lock));
ASSERT(!dl->dl_oldfmt);
if (dl->dl_havecache) {
/*
* After loading the tree, the caller may modify the tree,
* e.g. to add or remove nodes, or to make a node no longer
* refer to the empty_bpobj. These changes would make the
* dl_cache incorrect. Therefore we discard the cache here,
* so that it can't become incorrect.
*/
dsl_deadlist_cache_entry_t *dlce;
void *cookie = NULL;
while ((dlce = avl_destroy_nodes(&dl->dl_cache, &cookie))
!= NULL) {
kmem_free(dlce, sizeof (*dlce));
}
avl_destroy(&dl->dl_cache);
dl->dl_havecache = B_FALSE;
}
if (dl->dl_havetree)
return;
avl_create(&dl->dl_tree, dsl_deadlist_compare,
sizeof (dsl_deadlist_entry_t),
offsetof(dsl_deadlist_entry_t, dle_node));
for (zap_cursor_init(&zc, dl->dl_os, dl->dl_object);
(error = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
dsl_deadlist_entry_t *dle = kmem_alloc(sizeof (*dle), KM_SLEEP);
dle->dle_mintxg = zfs_strtonum(za.za_name, NULL);
/*
* Prefetch all the bpobj's so that we do that i/o
* in parallel. Then open them all in a second pass.
*/
dle->dle_bpobj.bpo_object = za.za_first_integer;
dmu_prefetch(dl->dl_os, dle->dle_bpobj.bpo_object,
0, 0, 0, ZIO_PRIORITY_SYNC_READ);
avl_add(&dl->dl_tree, dle);
}
VERIFY3U(error, ==, ENOENT);
zap_cursor_fini(&zc);
for (dsl_deadlist_entry_t *dle = avl_first(&dl->dl_tree);
dle != NULL; dle = AVL_NEXT(&dl->dl_tree, dle)) {
VERIFY0(bpobj_open(&dle->dle_bpobj, dl->dl_os,
dle->dle_bpobj.bpo_object));
}
dl->dl_havetree = B_TRUE;
}
/*
* Load only the non-empty bpobj's into the dl_cache. The cache is an analog
* of the dl_tree, but contains only non-empty_bpobj nodes from the ZAP. It
* is used only for gathering space statistics. The dl_cache has two
* advantages over the dl_tree:
*
* 1. Loading the dl_cache is ~5x faster than loading the dl_tree (if it's
* mostly empty_bpobj's), due to less CPU overhead to open the empty_bpobj
* many times and to inquire about its (zero) space stats many times.
*
* 2. The dl_cache uses less memory than the dl_tree. We only need to load
* the dl_tree of snapshots when deleting a snapshot, after which we free the
* dl_tree with dsl_deadlist_discard_tree
*/
static void
dsl_deadlist_load_cache(dsl_deadlist_t *dl)
{
zap_cursor_t zc;
zap_attribute_t za;
int error;
ASSERT(MUTEX_HELD(&dl->dl_lock));
ASSERT(!dl->dl_oldfmt);
if (dl->dl_havecache)
return;
uint64_t empty_bpobj = dmu_objset_pool(dl->dl_os)->dp_empty_bpobj;
avl_create(&dl->dl_cache, dsl_deadlist_cache_compare,
sizeof (dsl_deadlist_cache_entry_t),
offsetof(dsl_deadlist_cache_entry_t, dlce_node));
for (zap_cursor_init(&zc, dl->dl_os, dl->dl_object);
(error = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
if (za.za_first_integer == empty_bpobj)
continue;
dsl_deadlist_cache_entry_t *dlce =
kmem_zalloc(sizeof (*dlce), KM_SLEEP);
dlce->dlce_mintxg = zfs_strtonum(za.za_name, NULL);
/*
* Prefetch all the bpobj's so that we do that i/o
* in parallel. Then open them all in a second pass.
*/
dlce->dlce_bpobj = za.za_first_integer;
dmu_prefetch(dl->dl_os, dlce->dlce_bpobj,
0, 0, 0, ZIO_PRIORITY_SYNC_READ);
avl_add(&dl->dl_cache, dlce);
}
VERIFY3U(error, ==, ENOENT);
zap_cursor_fini(&zc);
for (dsl_deadlist_cache_entry_t *dlce = avl_first(&dl->dl_cache);
dlce != NULL; dlce = AVL_NEXT(&dl->dl_cache, dlce)) {
bpobj_t bpo;
VERIFY0(bpobj_open(&bpo, dl->dl_os, dlce->dlce_bpobj));
VERIFY0(bpobj_space(&bpo,
&dlce->dlce_bytes, &dlce->dlce_comp, &dlce->dlce_uncomp));
bpobj_close(&bpo);
}
dl->dl_havecache = B_TRUE;
}
/*
* Discard the tree to save memory.
*/
void
dsl_deadlist_discard_tree(dsl_deadlist_t *dl)
{
mutex_enter(&dl->dl_lock);
if (!dl->dl_havetree) {
mutex_exit(&dl->dl_lock);
return;
}
dsl_deadlist_entry_t *dle;
void *cookie = NULL;
while ((dle = avl_destroy_nodes(&dl->dl_tree, &cookie)) != NULL) {
bpobj_close(&dle->dle_bpobj);
kmem_free(dle, sizeof (*dle));
}
avl_destroy(&dl->dl_tree);
dl->dl_havetree = B_FALSE;
mutex_exit(&dl->dl_lock);
}
void
dsl_deadlist_iterate(dsl_deadlist_t *dl, deadlist_iter_t func, void *args)
{
dsl_deadlist_entry_t *dle;
ASSERT(dsl_deadlist_is_open(dl));
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_tree(dl);
mutex_exit(&dl->dl_lock);
for (dle = avl_first(&dl->dl_tree); dle != NULL;
dle = AVL_NEXT(&dl->dl_tree, dle)) {
if (func(args, dle) != 0)
break;
}
}
void
dsl_deadlist_open(dsl_deadlist_t *dl, objset_t *os, uint64_t object)
{
dmu_object_info_t doi;
ASSERT(!dsl_deadlist_is_open(dl));
mutex_init(&dl->dl_lock, NULL, MUTEX_DEFAULT, NULL);
dl->dl_os = os;
dl->dl_object = object;
VERIFY0(dmu_bonus_hold(os, object, dl, &dl->dl_dbuf));
dmu_object_info_from_db(dl->dl_dbuf, &doi);
if (doi.doi_type == DMU_OT_BPOBJ) {
dmu_buf_rele(dl->dl_dbuf, dl);
dl->dl_dbuf = NULL;
dl->dl_oldfmt = B_TRUE;
VERIFY0(bpobj_open(&dl->dl_bpobj, os, object));
return;
}
dl->dl_oldfmt = B_FALSE;
dl->dl_phys = dl->dl_dbuf->db_data;
dl->dl_havetree = B_FALSE;
dl->dl_havecache = B_FALSE;
}
boolean_t
dsl_deadlist_is_open(dsl_deadlist_t *dl)
{
return (dl->dl_os != NULL);
}
void
dsl_deadlist_close(dsl_deadlist_t *dl)
{
ASSERT(dsl_deadlist_is_open(dl));
mutex_destroy(&dl->dl_lock);
if (dl->dl_oldfmt) {
dl->dl_oldfmt = B_FALSE;
bpobj_close(&dl->dl_bpobj);
dl->dl_os = NULL;
dl->dl_object = 0;
return;
}
if (dl->dl_havetree) {
dsl_deadlist_entry_t *dle;
void *cookie = NULL;
while ((dle = avl_destroy_nodes(&dl->dl_tree, &cookie))
!= NULL) {
bpobj_close(&dle->dle_bpobj);
kmem_free(dle, sizeof (*dle));
}
avl_destroy(&dl->dl_tree);
}
if (dl->dl_havecache) {
dsl_deadlist_cache_entry_t *dlce;
void *cookie = NULL;
while ((dlce = avl_destroy_nodes(&dl->dl_cache, &cookie))
!= NULL) {
kmem_free(dlce, sizeof (*dlce));
}
avl_destroy(&dl->dl_cache);
}
dmu_buf_rele(dl->dl_dbuf, dl);
dl->dl_dbuf = NULL;
dl->dl_phys = NULL;
dl->dl_os = NULL;
dl->dl_object = 0;
}
uint64_t
dsl_deadlist_alloc(objset_t *os, dmu_tx_t *tx)
{
if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_DEADLISTS)
return (bpobj_alloc(os, SPA_OLD_MAXBLOCKSIZE, tx));
return (zap_create(os, DMU_OT_DEADLIST, DMU_OT_DEADLIST_HDR,
sizeof (dsl_deadlist_phys_t), tx));
}
void
dsl_deadlist_free(objset_t *os, uint64_t dlobj, dmu_tx_t *tx)
{
dmu_object_info_t doi;
zap_cursor_t zc;
zap_attribute_t za;
int error;
VERIFY0(dmu_object_info(os, dlobj, &doi));
if (doi.doi_type == DMU_OT_BPOBJ) {
bpobj_free(os, dlobj, tx);
return;
}
for (zap_cursor_init(&zc, os, dlobj);
(error = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
uint64_t obj = za.za_first_integer;
if (obj == dmu_objset_pool(os)->dp_empty_bpobj)
bpobj_decr_empty(os, tx);
else
bpobj_free(os, obj, tx);
}
VERIFY3U(error, ==, ENOENT);
zap_cursor_fini(&zc);
VERIFY0(dmu_object_free(os, dlobj, tx));
}
static void
dle_enqueue(dsl_deadlist_t *dl, dsl_deadlist_entry_t *dle,
const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx)
{
ASSERT(MUTEX_HELD(&dl->dl_lock));
if (dle->dle_bpobj.bpo_object ==
dmu_objset_pool(dl->dl_os)->dp_empty_bpobj) {
uint64_t obj = bpobj_alloc(dl->dl_os, SPA_OLD_MAXBLOCKSIZE, tx);
bpobj_close(&dle->dle_bpobj);
bpobj_decr_empty(dl->dl_os, tx);
VERIFY0(bpobj_open(&dle->dle_bpobj, dl->dl_os, obj));
VERIFY0(zap_update_int_key(dl->dl_os, dl->dl_object,
dle->dle_mintxg, obj, tx));
}
bpobj_enqueue(&dle->dle_bpobj, bp, bp_freed, tx);
}
static void
dle_enqueue_subobj(dsl_deadlist_t *dl, dsl_deadlist_entry_t *dle,
uint64_t obj, dmu_tx_t *tx)
{
ASSERT(MUTEX_HELD(&dl->dl_lock));
if (dle->dle_bpobj.bpo_object !=
dmu_objset_pool(dl->dl_os)->dp_empty_bpobj) {
bpobj_enqueue_subobj(&dle->dle_bpobj, obj, tx);
} else {
bpobj_close(&dle->dle_bpobj);
bpobj_decr_empty(dl->dl_os, tx);
VERIFY0(bpobj_open(&dle->dle_bpobj, dl->dl_os, obj));
VERIFY0(zap_update_int_key(dl->dl_os, dl->dl_object,
dle->dle_mintxg, obj, tx));
}
}
/*
* Prefetch metadata required for dle_enqueue_subobj().
*/
static void
dle_prefetch_subobj(dsl_deadlist_t *dl, dsl_deadlist_entry_t *dle,
uint64_t obj)
{
if (dle->dle_bpobj.bpo_object !=
dmu_objset_pool(dl->dl_os)->dp_empty_bpobj)
bpobj_prefetch_subobj(&dle->dle_bpobj, obj);
}
void
dsl_deadlist_insert(dsl_deadlist_t *dl, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
dsl_deadlist_entry_t dle_tofind;
dsl_deadlist_entry_t *dle;
avl_index_t where;
if (dl->dl_oldfmt) {
bpobj_enqueue(&dl->dl_bpobj, bp, bp_freed, tx);
return;
}
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_tree(dl);
dmu_buf_will_dirty(dl->dl_dbuf, tx);
int sign = bp_freed ? -1 : +1;
dl->dl_phys->dl_used +=
sign * bp_get_dsize_sync(dmu_objset_spa(dl->dl_os), bp);
dl->dl_phys->dl_comp += sign * BP_GET_PSIZE(bp);
dl->dl_phys->dl_uncomp += sign * BP_GET_UCSIZE(bp);
dle_tofind.dle_mintxg = bp->blk_birth;
dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
if (dle == NULL)
dle = avl_nearest(&dl->dl_tree, where, AVL_BEFORE);
else
dle = AVL_PREV(&dl->dl_tree, dle);
if (dle == NULL) {
zfs_panic_recover("blkptr at %p has invalid BLK_BIRTH %llu",
bp, (longlong_t)bp->blk_birth);
dle = avl_first(&dl->dl_tree);
}
ASSERT3P(dle, !=, NULL);
dle_enqueue(dl, dle, bp, bp_freed, tx);
mutex_exit(&dl->dl_lock);
}
int
dsl_deadlist_insert_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
dsl_deadlist_t *dl = arg;
dsl_deadlist_insert(dl, bp, B_FALSE, tx);
return (0);
}
int
dsl_deadlist_insert_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
dsl_deadlist_t *dl = arg;
dsl_deadlist_insert(dl, bp, B_TRUE, tx);
return (0);
}
/*
* Insert new key in deadlist, which must be > all current entries.
* mintxg is not inclusive.
*/
void
dsl_deadlist_add_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx)
{
uint64_t obj;
dsl_deadlist_entry_t *dle;
if (dl->dl_oldfmt)
return;
dle = kmem_alloc(sizeof (*dle), KM_SLEEP);
dle->dle_mintxg = mintxg;
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_tree(dl);
obj = bpobj_alloc_empty(dl->dl_os, SPA_OLD_MAXBLOCKSIZE, tx);
VERIFY0(bpobj_open(&dle->dle_bpobj, dl->dl_os, obj));
avl_add(&dl->dl_tree, dle);
VERIFY0(zap_add_int_key(dl->dl_os, dl->dl_object,
mintxg, obj, tx));
mutex_exit(&dl->dl_lock);
}
/*
* Remove this key, merging its entries into the previous key.
*/
void
dsl_deadlist_remove_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx)
{
dsl_deadlist_entry_t dle_tofind;
dsl_deadlist_entry_t *dle, *dle_prev;
if (dl->dl_oldfmt)
return;
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_tree(dl);
dle_tofind.dle_mintxg = mintxg;
dle = avl_find(&dl->dl_tree, &dle_tofind, NULL);
ASSERT3P(dle, !=, NULL);
dle_prev = AVL_PREV(&dl->dl_tree, dle);
ASSERT3P(dle_prev, !=, NULL);
dle_enqueue_subobj(dl, dle_prev, dle->dle_bpobj.bpo_object, tx);
avl_remove(&dl->dl_tree, dle);
bpobj_close(&dle->dle_bpobj);
kmem_free(dle, sizeof (*dle));
VERIFY0(zap_remove_int(dl->dl_os, dl->dl_object, mintxg, tx));
mutex_exit(&dl->dl_lock);
}
/*
* Remove a deadlist entry and all of its contents by removing the entry from
* the deadlist's avl tree, freeing the entry's bpobj and adjusting the
* deadlist's space accounting accordingly.
*/
void
dsl_deadlist_remove_entry(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx)
{
uint64_t used, comp, uncomp;
dsl_deadlist_entry_t dle_tofind;
dsl_deadlist_entry_t *dle;
objset_t *os = dl->dl_os;
if (dl->dl_oldfmt)
return;
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_tree(dl);
dle_tofind.dle_mintxg = mintxg;
dle = avl_find(&dl->dl_tree, &dle_tofind, NULL);
VERIFY3P(dle, !=, NULL);
avl_remove(&dl->dl_tree, dle);
VERIFY0(zap_remove_int(os, dl->dl_object, mintxg, tx));
VERIFY0(bpobj_space(&dle->dle_bpobj, &used, &comp, &uncomp));
dmu_buf_will_dirty(dl->dl_dbuf, tx);
dl->dl_phys->dl_used -= used;
dl->dl_phys->dl_comp -= comp;
dl->dl_phys->dl_uncomp -= uncomp;
if (dle->dle_bpobj.bpo_object == dmu_objset_pool(os)->dp_empty_bpobj) {
bpobj_decr_empty(os, tx);
} else {
bpobj_free(os, dle->dle_bpobj.bpo_object, tx);
}
bpobj_close(&dle->dle_bpobj);
kmem_free(dle, sizeof (*dle));
mutex_exit(&dl->dl_lock);
}
/*
* Clear out the contents of a deadlist_entry by freeing its bpobj,
* replacing it with an empty bpobj and adjusting the deadlist's
* space accounting
*/
void
dsl_deadlist_clear_entry(dsl_deadlist_entry_t *dle, dsl_deadlist_t *dl,
dmu_tx_t *tx)
{
uint64_t new_obj, used, comp, uncomp;
objset_t *os = dl->dl_os;
mutex_enter(&dl->dl_lock);
VERIFY0(zap_remove_int(os, dl->dl_object, dle->dle_mintxg, tx));
VERIFY0(bpobj_space(&dle->dle_bpobj, &used, &comp, &uncomp));
dmu_buf_will_dirty(dl->dl_dbuf, tx);
dl->dl_phys->dl_used -= used;
dl->dl_phys->dl_comp -= comp;
dl->dl_phys->dl_uncomp -= uncomp;
if (dle->dle_bpobj.bpo_object == dmu_objset_pool(os)->dp_empty_bpobj)
bpobj_decr_empty(os, tx);
else
bpobj_free(os, dle->dle_bpobj.bpo_object, tx);
bpobj_close(&dle->dle_bpobj);
new_obj = bpobj_alloc_empty(os, SPA_OLD_MAXBLOCKSIZE, tx);
VERIFY0(bpobj_open(&dle->dle_bpobj, os, new_obj));
VERIFY0(zap_add_int_key(os, dl->dl_object, dle->dle_mintxg,
new_obj, tx));
ASSERT(bpobj_is_empty(&dle->dle_bpobj));
mutex_exit(&dl->dl_lock);
}
/*
* Return the first entry in deadlist's avl tree
*/
dsl_deadlist_entry_t *
dsl_deadlist_first(dsl_deadlist_t *dl)
{
dsl_deadlist_entry_t *dle;
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_tree(dl);
dle = avl_first(&dl->dl_tree);
mutex_exit(&dl->dl_lock);
return (dle);
}
/*
* Return the last entry in deadlist's avl tree
*/
dsl_deadlist_entry_t *
dsl_deadlist_last(dsl_deadlist_t *dl)
{
dsl_deadlist_entry_t *dle;
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_tree(dl);
dle = avl_last(&dl->dl_tree);
mutex_exit(&dl->dl_lock);
return (dle);
}
/*
* Walk ds's snapshots to regenerate generate ZAP & AVL.
*/
static void
dsl_deadlist_regenerate(objset_t *os, uint64_t dlobj,
uint64_t mrs_obj, dmu_tx_t *tx)
{
dsl_deadlist_t dl = { 0 };
dsl_pool_t *dp = dmu_objset_pool(os);
dsl_deadlist_open(&dl, os, dlobj);
if (dl.dl_oldfmt) {
dsl_deadlist_close(&dl);
return;
}
while (mrs_obj != 0) {
dsl_dataset_t *ds;
VERIFY0(dsl_dataset_hold_obj(dp, mrs_obj, FTAG, &ds));
dsl_deadlist_add_key(&dl,
dsl_dataset_phys(ds)->ds_prev_snap_txg, tx);
mrs_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
dsl_dataset_rele(ds, FTAG);
}
dsl_deadlist_close(&dl);
}
uint64_t
dsl_deadlist_clone(dsl_deadlist_t *dl, uint64_t maxtxg,
uint64_t mrs_obj, dmu_tx_t *tx)
{
dsl_deadlist_entry_t *dle;
uint64_t newobj;
newobj = dsl_deadlist_alloc(dl->dl_os, tx);
if (dl->dl_oldfmt) {
dsl_deadlist_regenerate(dl->dl_os, newobj, mrs_obj, tx);
return (newobj);
}
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_tree(dl);
for (dle = avl_first(&dl->dl_tree); dle;
dle = AVL_NEXT(&dl->dl_tree, dle)) {
uint64_t obj;
if (dle->dle_mintxg >= maxtxg)
break;
obj = bpobj_alloc_empty(dl->dl_os, SPA_OLD_MAXBLOCKSIZE, tx);
VERIFY0(zap_add_int_key(dl->dl_os, newobj,
dle->dle_mintxg, obj, tx));
}
mutex_exit(&dl->dl_lock);
return (newobj);
}
void
dsl_deadlist_space(dsl_deadlist_t *dl,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
ASSERT(dsl_deadlist_is_open(dl));
if (dl->dl_oldfmt) {
VERIFY0(bpobj_space(&dl->dl_bpobj,
usedp, compp, uncompp));
return;
}
mutex_enter(&dl->dl_lock);
*usedp = dl->dl_phys->dl_used;
*compp = dl->dl_phys->dl_comp;
*uncompp = dl->dl_phys->dl_uncomp;
mutex_exit(&dl->dl_lock);
}
/*
* return space used in the range (mintxg, maxtxg].
* Includes maxtxg, does not include mintxg.
* mintxg and maxtxg must both be keys in the deadlist (unless maxtxg is
* UINT64_MAX).
*/
void
dsl_deadlist_space_range(dsl_deadlist_t *dl, uint64_t mintxg, uint64_t maxtxg,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
dsl_deadlist_cache_entry_t *dlce;
dsl_deadlist_cache_entry_t dlce_tofind;
avl_index_t where;
if (dl->dl_oldfmt) {
VERIFY0(bpobj_space_range(&dl->dl_bpobj,
mintxg, maxtxg, usedp, compp, uncompp));
return;
}
*usedp = *compp = *uncompp = 0;
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_cache(dl);
dlce_tofind.dlce_mintxg = mintxg;
dlce = avl_find(&dl->dl_cache, &dlce_tofind, &where);
/*
* If this mintxg doesn't exist, it may be an empty_bpobj which
* is omitted from the sparse tree. Start at the next non-empty
* entry.
*/
if (dlce == NULL)
dlce = avl_nearest(&dl->dl_cache, where, AVL_AFTER);
for (; dlce && dlce->dlce_mintxg < maxtxg;
dlce = AVL_NEXT(&dl->dl_tree, dlce)) {
*usedp += dlce->dlce_bytes;
*compp += dlce->dlce_comp;
*uncompp += dlce->dlce_uncomp;
}
mutex_exit(&dl->dl_lock);
}
static void
dsl_deadlist_insert_bpobj(dsl_deadlist_t *dl, uint64_t obj, uint64_t birth,
dmu_tx_t *tx)
{
dsl_deadlist_entry_t dle_tofind;
dsl_deadlist_entry_t *dle;
avl_index_t where;
uint64_t used, comp, uncomp;
bpobj_t bpo;
ASSERT(MUTEX_HELD(&dl->dl_lock));
VERIFY0(bpobj_open(&bpo, dl->dl_os, obj));
VERIFY0(bpobj_space(&bpo, &used, &comp, &uncomp));
bpobj_close(&bpo);
dsl_deadlist_load_tree(dl);
dmu_buf_will_dirty(dl->dl_dbuf, tx);
dl->dl_phys->dl_used += used;
dl->dl_phys->dl_comp += comp;
dl->dl_phys->dl_uncomp += uncomp;
dle_tofind.dle_mintxg = birth;
dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
if (dle == NULL)
dle = avl_nearest(&dl->dl_tree, where, AVL_BEFORE);
dle_enqueue_subobj(dl, dle, obj, tx);
}
/*
* Prefetch metadata required for dsl_deadlist_insert_bpobj().
*/
static void
dsl_deadlist_prefetch_bpobj(dsl_deadlist_t *dl, uint64_t obj, uint64_t birth)
{
dsl_deadlist_entry_t dle_tofind;
dsl_deadlist_entry_t *dle;
avl_index_t where;
ASSERT(MUTEX_HELD(&dl->dl_lock));
dsl_deadlist_load_tree(dl);
dle_tofind.dle_mintxg = birth;
dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
if (dle == NULL)
dle = avl_nearest(&dl->dl_tree, where, AVL_BEFORE);
dle_prefetch_subobj(dl, dle, obj);
}
static int
dsl_deadlist_insert_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
dsl_deadlist_t *dl = arg;
dsl_deadlist_insert(dl, bp, bp_freed, tx);
return (0);
}
/*
* Merge the deadlist pointed to by 'obj' into dl. obj will be left as
* an empty deadlist.
*/
void
dsl_deadlist_merge(dsl_deadlist_t *dl, uint64_t obj, dmu_tx_t *tx)
{
zap_cursor_t zc, pzc;
zap_attribute_t *za, *pza;
dmu_buf_t *bonus;
dsl_deadlist_phys_t *dlp;
dmu_object_info_t doi;
int error, perror, i;
VERIFY0(dmu_object_info(dl->dl_os, obj, &doi));
if (doi.doi_type == DMU_OT_BPOBJ) {
bpobj_t bpo;
VERIFY0(bpobj_open(&bpo, dl->dl_os, obj));
VERIFY0(bpobj_iterate(&bpo, dsl_deadlist_insert_cb, dl, tx));
bpobj_close(&bpo);
return;
}
za = kmem_alloc(sizeof (*za), KM_SLEEP);
pza = kmem_alloc(sizeof (*pza), KM_SLEEP);
mutex_enter(&dl->dl_lock);
/*
* Prefetch up to 128 deadlists first and then more as we progress.
* The limit is a balance between ARC use and diminishing returns.
*/
for (zap_cursor_init(&pzc, dl->dl_os, obj), i = 0;
(perror = zap_cursor_retrieve(&pzc, pza)) == 0 && i < 128;
zap_cursor_advance(&pzc), i++) {
dsl_deadlist_prefetch_bpobj(dl, pza->za_first_integer,
zfs_strtonum(pza->za_name, NULL));
}
for (zap_cursor_init(&zc, dl->dl_os, obj);
(error = zap_cursor_retrieve(&zc, za)) == 0;
zap_cursor_advance(&zc)) {
- uint64_t mintxg = zfs_strtonum(za->za_name, NULL);
- dsl_deadlist_insert_bpobj(dl, za->za_first_integer, mintxg, tx);
- VERIFY0(zap_remove_int(dl->dl_os, obj, mintxg, tx));
+ dsl_deadlist_insert_bpobj(dl, za->za_first_integer,
+ zfs_strtonum(za->za_name, NULL), tx);
+ VERIFY0(zap_remove(dl->dl_os, obj, za->za_name, tx));
if (perror == 0) {
dsl_deadlist_prefetch_bpobj(dl, pza->za_first_integer,
zfs_strtonum(pza->za_name, NULL));
zap_cursor_advance(&pzc);
perror = zap_cursor_retrieve(&pzc, pza);
}
}
VERIFY3U(error, ==, ENOENT);
zap_cursor_fini(&zc);
zap_cursor_fini(&pzc);
VERIFY0(dmu_bonus_hold(dl->dl_os, obj, FTAG, &bonus));
dlp = bonus->db_data;
dmu_buf_will_dirty(bonus, tx);
memset(dlp, 0, sizeof (*dlp));
dmu_buf_rele(bonus, FTAG);
mutex_exit(&dl->dl_lock);
kmem_free(za, sizeof (*za));
kmem_free(pza, sizeof (*pza));
}
/*
* Remove entries on dl that are born > mintxg, and put them on the bpobj.
*/
void
dsl_deadlist_move_bpobj(dsl_deadlist_t *dl, bpobj_t *bpo, uint64_t mintxg,
dmu_tx_t *tx)
{
dsl_deadlist_entry_t dle_tofind;
dsl_deadlist_entry_t *dle, *pdle;
avl_index_t where;
int i;
ASSERT(!dl->dl_oldfmt);
mutex_enter(&dl->dl_lock);
dmu_buf_will_dirty(dl->dl_dbuf, tx);
dsl_deadlist_load_tree(dl);
dle_tofind.dle_mintxg = mintxg;
dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
if (dle == NULL)
dle = avl_nearest(&dl->dl_tree, where, AVL_AFTER);
/*
* Prefetch up to 128 deadlists first and then more as we progress.
* The limit is a balance between ARC use and diminishing returns.
*/
for (pdle = dle, i = 0; pdle && i < 128; i++) {
bpobj_prefetch_subobj(bpo, pdle->dle_bpobj.bpo_object);
pdle = AVL_NEXT(&dl->dl_tree, pdle);
}
while (dle) {
uint64_t used, comp, uncomp;
dsl_deadlist_entry_t *dle_next;
bpobj_enqueue_subobj(bpo, dle->dle_bpobj.bpo_object, tx);
if (pdle) {
bpobj_prefetch_subobj(bpo, pdle->dle_bpobj.bpo_object);
pdle = AVL_NEXT(&dl->dl_tree, pdle);
}
VERIFY0(bpobj_space(&dle->dle_bpobj,
&used, &comp, &uncomp));
ASSERT3U(dl->dl_phys->dl_used, >=, used);
ASSERT3U(dl->dl_phys->dl_comp, >=, comp);
ASSERT3U(dl->dl_phys->dl_uncomp, >=, uncomp);
dl->dl_phys->dl_used -= used;
dl->dl_phys->dl_comp -= comp;
dl->dl_phys->dl_uncomp -= uncomp;
VERIFY0(zap_remove_int(dl->dl_os, dl->dl_object,
dle->dle_mintxg, tx));
dle_next = AVL_NEXT(&dl->dl_tree, dle);
avl_remove(&dl->dl_tree, dle);
bpobj_close(&dle->dle_bpobj);
kmem_free(dle, sizeof (*dle));
dle = dle_next;
}
mutex_exit(&dl->dl_lock);
}
typedef struct livelist_entry {
blkptr_t le_bp;
uint32_t le_refcnt;
avl_node_t le_node;
} livelist_entry_t;
static int
livelist_compare(const void *larg, const void *rarg)
{
const blkptr_t *l = &((livelist_entry_t *)larg)->le_bp;
const blkptr_t *r = &((livelist_entry_t *)rarg)->le_bp;
/* Sort them according to dva[0] */
uint64_t l_dva0_vdev = DVA_GET_VDEV(&l->blk_dva[0]);
uint64_t r_dva0_vdev = DVA_GET_VDEV(&r->blk_dva[0]);
if (l_dva0_vdev != r_dva0_vdev)
return (TREE_CMP(l_dva0_vdev, r_dva0_vdev));
/* if vdevs are equal, sort by offsets. */
uint64_t l_dva0_offset = DVA_GET_OFFSET(&l->blk_dva[0]);
uint64_t r_dva0_offset = DVA_GET_OFFSET(&r->blk_dva[0]);
if (l_dva0_offset == r_dva0_offset)
ASSERT3U(l->blk_birth, ==, r->blk_birth);
return (TREE_CMP(l_dva0_offset, r_dva0_offset));
}
struct livelist_iter_arg {
avl_tree_t *avl;
bplist_t *to_free;
zthr_t *t;
};
/*
* Expects an AVL tree which is incrementally filled will FREE blkptrs
* and used to match up ALLOC/FREE pairs. ALLOC'd blkptrs without a
* corresponding FREE are stored in the supplied bplist.
*
* Note that multiple FREE and ALLOC entries for the same blkptr may
* be encountered when dedup is involved. For this reason we keep a
* refcount for all the FREE entries of each blkptr and ensure that
* each of those FREE entries has a corresponding ALLOC preceding it.
*/
static int
dsl_livelist_iterate(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
struct livelist_iter_arg *lia = arg;
avl_tree_t *avl = lia->avl;
bplist_t *to_free = lia->to_free;
zthr_t *t = lia->t;
ASSERT(tx == NULL);
if ((t != NULL) && (zthr_has_waiters(t) || zthr_iscancelled(t)))
return (SET_ERROR(EINTR));
livelist_entry_t node;
node.le_bp = *bp;
livelist_entry_t *found = avl_find(avl, &node, NULL);
if (bp_freed) {
if (found == NULL) {
/* first free entry for this blkptr */
livelist_entry_t *e =
kmem_alloc(sizeof (livelist_entry_t), KM_SLEEP);
e->le_bp = *bp;
e->le_refcnt = 1;
avl_add(avl, e);
} else {
/* dedup block free */
ASSERT(BP_GET_DEDUP(bp));
ASSERT3U(BP_GET_CHECKSUM(bp), ==,
BP_GET_CHECKSUM(&found->le_bp));
ASSERT3U(found->le_refcnt + 1, >, found->le_refcnt);
found->le_refcnt++;
}
} else {
if (found == NULL) {
/* block is currently marked as allocated */
bplist_append(to_free, bp);
} else {
/* alloc matches a free entry */
ASSERT3U(found->le_refcnt, !=, 0);
found->le_refcnt--;
if (found->le_refcnt == 0) {
/* all tracked free pairs have been matched */
avl_remove(avl, found);
kmem_free(found, sizeof (livelist_entry_t));
} else {
/*
* This is definitely a deduped blkptr so
* let's validate it.
*/
ASSERT(BP_GET_DEDUP(bp));
ASSERT3U(BP_GET_CHECKSUM(bp), ==,
BP_GET_CHECKSUM(&found->le_bp));
}
}
}
return (0);
}
/*
* Accepts a bpobj and a bplist. Will insert into the bplist the blkptrs
* which have an ALLOC entry but no matching FREE
*/
int
dsl_process_sub_livelist(bpobj_t *bpobj, bplist_t *to_free, zthr_t *t,
uint64_t *size)
{
avl_tree_t avl;
avl_create(&avl, livelist_compare, sizeof (livelist_entry_t),
offsetof(livelist_entry_t, le_node));
/* process the sublist */
struct livelist_iter_arg arg = {
.avl = &avl,
.to_free = to_free,
.t = t
};
int err = bpobj_iterate_nofree(bpobj, dsl_livelist_iterate, &arg, size);
VERIFY(err != 0 || avl_numnodes(&avl) == 0);
void *cookie = NULL;
livelist_entry_t *le = NULL;
while ((le = avl_destroy_nodes(&avl, &cookie)) != NULL) {
kmem_free(le, sizeof (livelist_entry_t));
}
avl_destroy(&avl);
return (err);
}
ZFS_MODULE_PARAM(zfs_livelist, zfs_livelist_, max_entries, U64, ZMOD_RW,
"Size to start the next sub-livelist in a livelist");
ZFS_MODULE_PARAM(zfs_livelist, zfs_livelist_, min_percent_shared, INT, ZMOD_RW,
"Threshold at which livelist is disabled");
diff --git a/sys/contrib/openzfs/module/zfs/dsl_scan.c b/sys/contrib/openzfs/module/zfs/dsl_scan.c
index 50428bff3ef4..34012db82dee 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_scan.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_scan.c
@@ -1,5260 +1,5271 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2021 by Delphix. All rights reserved.
* Copyright 2016 Gary Mills
* Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
* Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
* Copyright 2019 Joyent, Inc.
*/
#include <sys/dsl_scan.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_synctask.h>
#include <sys/dnode.h>
#include <sys/dmu_tx.h>
#include <sys/dmu_objset.h>
#include <sys/arc.h>
#include <sys/arc_impl.h>
#include <sys/zap.h>
#include <sys/zio.h>
#include <sys/zfs_context.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/zil_impl.h>
#include <sys/zio_checksum.h>
#include <sys/brt.h>
#include <sys/ddt.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/zfeature.h>
#include <sys/abd.h>
#include <sys/range_tree.h>
#include <sys/dbuf.h>
#ifdef _KERNEL
#include <sys/zfs_vfsops.h>
#endif
/*
* Grand theory statement on scan queue sorting
*
* Scanning is implemented by recursively traversing all indirection levels
* in an object and reading all blocks referenced from said objects. This
* results in us approximately traversing the object from lowest logical
* offset to the highest. For best performance, we would want the logical
* blocks to be physically contiguous. However, this is frequently not the
* case with pools given the allocation patterns of copy-on-write filesystems.
* So instead, we put the I/Os into a reordering queue and issue them in a
* way that will most benefit physical disks (LBA-order).
*
* Queue management:
*
* Ideally, we would want to scan all metadata and queue up all block I/O
* prior to starting to issue it, because that allows us to do an optimal
* sorting job. This can however consume large amounts of memory. Therefore
* we continuously monitor the size of the queues and constrain them to 5%
* (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this
* limit, we clear out a few of the largest extents at the head of the queues
* to make room for more scanning. Hopefully, these extents will be fairly
* large and contiguous, allowing us to approach sequential I/O throughput
* even without a fully sorted tree.
*
* Metadata scanning takes place in dsl_scan_visit(), which is called from
* dsl_scan_sync() every spa_sync(). If we have either fully scanned all
* metadata on the pool, or we need to make room in memory because our
* queues are too large, dsl_scan_visit() is postponed and
* scan_io_queues_run() is called from dsl_scan_sync() instead. This implies
* that metadata scanning and queued I/O issuing are mutually exclusive. This
* allows us to provide maximum sequential I/O throughput for the majority of
* I/O's issued since sequential I/O performance is significantly negatively
* impacted if it is interleaved with random I/O.
*
* Implementation Notes
*
* One side effect of the queued scanning algorithm is that the scanning code
* needs to be notified whenever a block is freed. This is needed to allow
* the scanning code to remove these I/Os from the issuing queue. Additionally,
* we do not attempt to queue gang blocks to be issued sequentially since this
* is very hard to do and would have an extremely limited performance benefit.
* Instead, we simply issue gang I/Os as soon as we find them using the legacy
* algorithm.
*
* Backwards compatibility
*
* This new algorithm is backwards compatible with the legacy on-disk data
* structures (and therefore does not require a new feature flag).
* Periodically during scanning (see zfs_scan_checkpoint_intval), the scan
* will stop scanning metadata (in logical order) and wait for all outstanding
* sorted I/O to complete. Once this is done, we write out a checkpoint
* bookmark, indicating that we have scanned everything logically before it.
* If the pool is imported on a machine without the new sorting algorithm,
* the scan simply resumes from the last checkpoint using the legacy algorithm.
*/
typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *,
const zbookmark_phys_t *);
static scan_cb_t dsl_scan_scrub_cb;
static int scan_ds_queue_compare(const void *a, const void *b);
static int scan_prefetch_queue_compare(const void *a, const void *b);
static void scan_ds_queue_clear(dsl_scan_t *scn);
static void scan_ds_prefetch_queue_clear(dsl_scan_t *scn);
static boolean_t scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj,
uint64_t *txg);
static void scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg);
static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj);
static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx);
static uint64_t dsl_scan_count_data_disks(spa_t *spa);
static void read_by_block_level(dsl_scan_t *scn, zbookmark_phys_t zb);
extern uint_t zfs_vdev_async_write_active_min_dirty_percent;
static int zfs_scan_blkstats = 0;
/*
* 'zpool status' uses bytes processed per pass to report throughput and
* estimate time remaining. We define a pass to start when the scanning
* phase completes for a sequential resilver. Optionally, this value
* may be used to reset the pass statistics every N txgs to provide an
* estimated completion time based on currently observed performance.
*/
static uint_t zfs_scan_report_txgs = 0;
/*
* By default zfs will check to ensure it is not over the hard memory
* limit before each txg. If finer-grained control of this is needed
* this value can be set to 1 to enable checking before scanning each
* block.
*/
static int zfs_scan_strict_mem_lim = B_FALSE;
/*
* Maximum number of parallelly executed bytes per leaf vdev. We attempt
* to strike a balance here between keeping the vdev queues full of I/Os
* at all times and not overflowing the queues to cause long latency,
* which would cause long txg sync times. No matter what, we will not
* overload the drives with I/O, since that is protected by
* zfs_vdev_scrub_max_active.
*/
static uint64_t zfs_scan_vdev_limit = 16 << 20;
static uint_t zfs_scan_issue_strategy = 0;
/* don't queue & sort zios, go direct */
static int zfs_scan_legacy = B_FALSE;
static uint64_t zfs_scan_max_ext_gap = 2 << 20; /* in bytes */
/*
* fill_weight is non-tunable at runtime, so we copy it at module init from
* zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would
* break queue sorting.
*/
static uint_t zfs_scan_fill_weight = 3;
static uint64_t fill_weight;
/* See dsl_scan_should_clear() for details on the memory limit tunables */
static const uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */
static const uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */
/* fraction of physmem */
static uint_t zfs_scan_mem_lim_fact = 20;
/* fraction of mem lim above */
static uint_t zfs_scan_mem_lim_soft_fact = 20;
/* minimum milliseconds to scrub per txg */
static uint_t zfs_scrub_min_time_ms = 1000;
/* minimum milliseconds to obsolete per txg */
static uint_t zfs_obsolete_min_time_ms = 500;
/* minimum milliseconds to free per txg */
static uint_t zfs_free_min_time_ms = 1000;
/* minimum milliseconds to resilver per txg */
static uint_t zfs_resilver_min_time_ms = 3000;
static uint_t zfs_scan_checkpoint_intval = 7200; /* in seconds */
int zfs_scan_suspend_progress = 0; /* set to prevent scans from progressing */
static int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
static int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */
static const enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
/* max number of blocks to free in a single TXG */
static uint64_t zfs_async_block_max_blocks = UINT64_MAX;
/* max number of dedup blocks to free in a single TXG */
static uint64_t zfs_max_async_dedup_frees = 100000;
/* set to disable resilver deferring */
static int zfs_resilver_disable_defer = B_FALSE;
/*
* We wait a few txgs after importing a pool to begin scanning so that
* the import / mounting code isn't held up by scrub / resilver IO.
* Unfortunately, it is a bit difficult to determine exactly how long
* this will take since userspace will trigger fs mounts asynchronously
* and the kernel will create zvol minors asynchronously. As a result,
* the value provided here is a bit arbitrary, but represents a
* reasonable estimate of how many txgs it will take to finish fully
* importing a pool
*/
#define SCAN_IMPORT_WAIT_TXGS 5
#define DSL_SCAN_IS_SCRUB_RESILVER(scn) \
((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
(scn)->scn_phys.scn_func == POOL_SCAN_RESILVER)
/*
* Enable/disable the processing of the free_bpobj object.
*/
static int zfs_free_bpobj_enabled = 1;
/* Error blocks to be scrubbed in one txg. */
static uint_t zfs_scrub_error_blocks_per_txg = 1 << 12;
/* the order has to match pool_scan_type */
static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = {
NULL,
dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */
dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */
};
/* In core node for the scn->scn_queue. Represents a dataset to be scanned */
typedef struct {
uint64_t sds_dsobj;
uint64_t sds_txg;
avl_node_t sds_node;
} scan_ds_t;
/*
* This controls what conditions are placed on dsl_scan_sync_state():
* SYNC_OPTIONAL) write out scn_phys iff scn_queues_pending == 0
* SYNC_MANDATORY) write out scn_phys always. scn_queues_pending must be 0.
* SYNC_CACHED) if scn_queues_pending == 0, write out scn_phys. Otherwise
* write out the scn_phys_cached version.
* See dsl_scan_sync_state for details.
*/
typedef enum {
SYNC_OPTIONAL,
SYNC_MANDATORY,
SYNC_CACHED
} state_sync_type_t;
/*
* This struct represents the minimum information needed to reconstruct a
* zio for sequential scanning. This is useful because many of these will
* accumulate in the sequential IO queues before being issued, so saving
* memory matters here.
*/
typedef struct scan_io {
/* fields from blkptr_t */
uint64_t sio_blk_prop;
uint64_t sio_phys_birth;
uint64_t sio_birth;
zio_cksum_t sio_cksum;
uint32_t sio_nr_dvas;
/* fields from zio_t */
uint32_t sio_flags;
zbookmark_phys_t sio_zb;
/* members for queue sorting */
union {
avl_node_t sio_addr_node; /* link into issuing queue */
list_node_t sio_list_node; /* link for issuing to disk */
} sio_nodes;
/*
* There may be up to SPA_DVAS_PER_BP DVAs here from the bp,
* depending on how many were in the original bp. Only the
* first DVA is really used for sorting and issuing purposes.
* The other DVAs (if provided) simply exist so that the zio
* layer can find additional copies to repair from in the
* event of an error. This array must go at the end of the
* struct to allow this for the variable number of elements.
*/
dva_t sio_dva[];
} scan_io_t;
#define SIO_SET_OFFSET(sio, x) DVA_SET_OFFSET(&(sio)->sio_dva[0], x)
#define SIO_SET_ASIZE(sio, x) DVA_SET_ASIZE(&(sio)->sio_dva[0], x)
#define SIO_GET_OFFSET(sio) DVA_GET_OFFSET(&(sio)->sio_dva[0])
#define SIO_GET_ASIZE(sio) DVA_GET_ASIZE(&(sio)->sio_dva[0])
#define SIO_GET_END_OFFSET(sio) \
(SIO_GET_OFFSET(sio) + SIO_GET_ASIZE(sio))
#define SIO_GET_MUSED(sio) \
(sizeof (scan_io_t) + ((sio)->sio_nr_dvas * sizeof (dva_t)))
struct dsl_scan_io_queue {
dsl_scan_t *q_scn; /* associated dsl_scan_t */
vdev_t *q_vd; /* top-level vdev that this queue represents */
zio_t *q_zio; /* scn_zio_root child for waiting on IO */
/* trees used for sorting I/Os and extents of I/Os */
range_tree_t *q_exts_by_addr;
zfs_btree_t q_exts_by_size;
avl_tree_t q_sios_by_addr;
uint64_t q_sio_memused;
uint64_t q_last_ext_addr;
/* members for zio rate limiting */
uint64_t q_maxinflight_bytes;
uint64_t q_inflight_bytes;
kcondvar_t q_zio_cv; /* used under vd->vdev_scan_io_queue_lock */
/* per txg statistics */
uint64_t q_total_seg_size_this_txg;
uint64_t q_segs_this_txg;
uint64_t q_total_zio_size_this_txg;
uint64_t q_zios_this_txg;
};
/* private data for dsl_scan_prefetch_cb() */
typedef struct scan_prefetch_ctx {
zfs_refcount_t spc_refcnt; /* refcount for memory management */
dsl_scan_t *spc_scn; /* dsl_scan_t for the pool */
boolean_t spc_root; /* is this prefetch for an objset? */
uint8_t spc_indblkshift; /* dn_indblkshift of current dnode */
uint16_t spc_datablkszsec; /* dn_idatablkszsec of current dnode */
} scan_prefetch_ctx_t;
/* private data for dsl_scan_prefetch() */
typedef struct scan_prefetch_issue_ctx {
avl_node_t spic_avl_node; /* link into scn->scn_prefetch_queue */
scan_prefetch_ctx_t *spic_spc; /* spc for the callback */
blkptr_t spic_bp; /* bp to prefetch */
zbookmark_phys_t spic_zb; /* bookmark to prefetch */
} scan_prefetch_issue_ctx_t;
static void scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue);
static void scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue,
scan_io_t *sio);
static dsl_scan_io_queue_t *scan_io_queue_create(vdev_t *vd);
static void scan_io_queues_destroy(dsl_scan_t *scn);
static kmem_cache_t *sio_cache[SPA_DVAS_PER_BP];
/* sio->sio_nr_dvas must be set so we know which cache to free from */
static void
sio_free(scan_io_t *sio)
{
ASSERT3U(sio->sio_nr_dvas, >, 0);
ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP);
kmem_cache_free(sio_cache[sio->sio_nr_dvas - 1], sio);
}
/* It is up to the caller to set sio->sio_nr_dvas for freeing */
static scan_io_t *
sio_alloc(unsigned short nr_dvas)
{
ASSERT3U(nr_dvas, >, 0);
ASSERT3U(nr_dvas, <=, SPA_DVAS_PER_BP);
return (kmem_cache_alloc(sio_cache[nr_dvas - 1], KM_SLEEP));
}
void
scan_init(void)
{
/*
* This is used in ext_size_compare() to weight segments
* based on how sparse they are. This cannot be changed
* mid-scan and the tree comparison functions don't currently
* have a mechanism for passing additional context to the
* compare functions. Thus we store this value globally and
* we only allow it to be set at module initialization time
*/
fill_weight = zfs_scan_fill_weight;
for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
char name[36];
(void) snprintf(name, sizeof (name), "sio_cache_%d", i);
sio_cache[i] = kmem_cache_create(name,
(sizeof (scan_io_t) + ((i + 1) * sizeof (dva_t))),
0, NULL, NULL, NULL, NULL, NULL, 0);
}
}
void
scan_fini(void)
{
for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
kmem_cache_destroy(sio_cache[i]);
}
}
static inline boolean_t
dsl_scan_is_running(const dsl_scan_t *scn)
{
return (scn->scn_phys.scn_state == DSS_SCANNING);
}
boolean_t
dsl_scan_resilvering(dsl_pool_t *dp)
{
return (dsl_scan_is_running(dp->dp_scan) &&
dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER);
}
static inline void
sio2bp(const scan_io_t *sio, blkptr_t *bp)
{
memset(bp, 0, sizeof (*bp));
bp->blk_prop = sio->sio_blk_prop;
bp->blk_phys_birth = sio->sio_phys_birth;
bp->blk_birth = sio->sio_birth;
bp->blk_fill = 1; /* we always only work with data pointers */
bp->blk_cksum = sio->sio_cksum;
ASSERT3U(sio->sio_nr_dvas, >, 0);
ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP);
memcpy(bp->blk_dva, sio->sio_dva, sio->sio_nr_dvas * sizeof (dva_t));
}
static inline void
bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i)
{
sio->sio_blk_prop = bp->blk_prop;
sio->sio_phys_birth = bp->blk_phys_birth;
sio->sio_birth = bp->blk_birth;
sio->sio_cksum = bp->blk_cksum;
sio->sio_nr_dvas = BP_GET_NDVAS(bp);
/*
* Copy the DVAs to the sio. We need all copies of the block so
* that the self healing code can use the alternate copies if the
* first is corrupted. We want the DVA at index dva_i to be first
* in the sio since this is the primary one that we want to issue.
*/
for (int i = 0, j = dva_i; i < sio->sio_nr_dvas; i++, j++) {
sio->sio_dva[i] = bp->blk_dva[j % sio->sio_nr_dvas];
}
}
int
dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
{
int err;
dsl_scan_t *scn;
spa_t *spa = dp->dp_spa;
uint64_t f;
scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP);
scn->scn_dp = dp;
/*
* It's possible that we're resuming a scan after a reboot so
* make sure that the scan_async_destroying flag is initialized
* appropriately.
*/
ASSERT(!scn->scn_async_destroying);
scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_ASYNC_DESTROY);
/*
* Calculate the max number of in-flight bytes for pool-wide
* scanning operations (minimum 1MB, maximum 1/4 of arc_c_max).
* Limits for the issuing phase are done per top-level vdev and
* are handled separately.
*/
scn->scn_maxinflight_bytes = MIN(arc_c_max / 4, MAX(1ULL << 20,
zfs_scan_vdev_limit * dsl_scan_count_data_disks(spa)));
avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t),
offsetof(scan_ds_t, sds_node));
avl_create(&scn->scn_prefetch_queue, scan_prefetch_queue_compare,
sizeof (scan_prefetch_issue_ctx_t),
offsetof(scan_prefetch_issue_ctx_t, spic_avl_node));
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
"scrub_func", sizeof (uint64_t), 1, &f);
if (err == 0) {
/*
* There was an old-style scrub in progress. Restart a
* new-style scrub from the beginning.
*/
scn->scn_restart_txg = txg;
zfs_dbgmsg("old-style scrub was in progress for %s; "
"restarting new-style scrub in txg %llu",
spa->spa_name,
(longlong_t)scn->scn_restart_txg);
/*
* Load the queue obj from the old location so that it
* can be freed by dsl_scan_done().
*/
(void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
"scrub_queue", sizeof (uint64_t), 1,
&scn->scn_phys.scn_queue_obj);
} else {
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ERRORSCRUB, sizeof (uint64_t),
ERRORSCRUB_PHYS_NUMINTS, &scn->errorscrub_phys);
if (err != 0 && err != ENOENT)
return (err);
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys);
/*
* Detect if the pool contains the signature of #2094. If it
* does properly update the scn->scn_phys structure and notify
* the administrator by setting an errata for the pool.
*/
if (err == EOVERFLOW) {
uint64_t zaptmp[SCAN_PHYS_NUMINTS + 1];
VERIFY3S(SCAN_PHYS_NUMINTS, ==, 24);
VERIFY3S(offsetof(dsl_scan_phys_t, scn_flags), ==,
(23 * sizeof (uint64_t)));
err = zap_lookup(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SCAN,
sizeof (uint64_t), SCAN_PHYS_NUMINTS + 1, &zaptmp);
if (err == 0) {
uint64_t overflow = zaptmp[SCAN_PHYS_NUMINTS];
if (overflow & ~DSL_SCAN_FLAGS_MASK ||
scn->scn_async_destroying) {
spa->spa_errata =
ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY;
return (EOVERFLOW);
}
memcpy(&scn->scn_phys, zaptmp,
SCAN_PHYS_NUMINTS * sizeof (uint64_t));
scn->scn_phys.scn_flags = overflow;
/* Required scrub already in progress. */
if (scn->scn_phys.scn_state == DSS_FINISHED ||
scn->scn_phys.scn_state == DSS_CANCELED)
spa->spa_errata =
ZPOOL_ERRATA_ZOL_2094_SCRUB;
}
}
if (err == ENOENT)
return (0);
else if (err)
return (err);
/*
* We might be restarting after a reboot, so jump the issued
* counter to how far we've scanned. We know we're consistent
* up to here.
*/
scn->scn_issued_before_pass = scn->scn_phys.scn_examined -
scn->scn_phys.scn_skipped;
if (dsl_scan_is_running(scn) &&
spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) {
/*
* A new-type scrub was in progress on an old
* pool, and the pool was accessed by old
* software. Restart from the beginning, since
* the old software may have changed the pool in
* the meantime.
*/
scn->scn_restart_txg = txg;
zfs_dbgmsg("new-style scrub for %s was modified "
"by old software; restarting in txg %llu",
spa->spa_name,
(longlong_t)scn->scn_restart_txg);
} else if (dsl_scan_resilvering(dp)) {
/*
* If a resilver is in progress and there are already
* errors, restart it instead of finishing this scan and
* then restarting it. If there haven't been any errors
* then remember that the incore DTL is valid.
*/
if (scn->scn_phys.scn_errors > 0) {
scn->scn_restart_txg = txg;
zfs_dbgmsg("resilver can't excise DTL_MISSING "
"when finished; restarting on %s in txg "
"%llu",
spa->spa_name,
(u_longlong_t)scn->scn_restart_txg);
} else {
/* it's safe to excise DTL when finished */
spa->spa_scrub_started = B_TRUE;
}
}
}
memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys));
/* reload the queue into the in-core state */
if (scn->scn_phys.scn_queue_obj != 0) {
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj);
zap_cursor_retrieve(&zc, &za) == 0;
(void) zap_cursor_advance(&zc)) {
scan_ds_queue_insert(scn,
zfs_strtonum(za.za_name, NULL),
za.za_first_integer);
}
zap_cursor_fini(&zc);
}
spa_scan_stat_init(spa);
vdev_scan_stat_init(spa->spa_root_vdev);
return (0);
}
void
dsl_scan_fini(dsl_pool_t *dp)
{
if (dp->dp_scan != NULL) {
dsl_scan_t *scn = dp->dp_scan;
if (scn->scn_taskq != NULL)
taskq_destroy(scn->scn_taskq);
scan_ds_queue_clear(scn);
avl_destroy(&scn->scn_queue);
scan_ds_prefetch_queue_clear(scn);
avl_destroy(&scn->scn_prefetch_queue);
kmem_free(dp->dp_scan, sizeof (dsl_scan_t));
dp->dp_scan = NULL;
}
}
static boolean_t
dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx)
{
return (scn->scn_restart_txg != 0 &&
scn->scn_restart_txg <= tx->tx_txg);
}
boolean_t
dsl_scan_resilver_scheduled(dsl_pool_t *dp)
{
return ((dp->dp_scan && dp->dp_scan->scn_restart_txg != 0) ||
(spa_async_tasks(dp->dp_spa) & SPA_ASYNC_RESILVER));
}
boolean_t
dsl_scan_scrubbing(const dsl_pool_t *dp)
{
dsl_scan_phys_t *scn_phys = &dp->dp_scan->scn_phys;
return (scn_phys->scn_state == DSS_SCANNING &&
scn_phys->scn_func == POOL_SCAN_SCRUB);
}
boolean_t
dsl_errorscrubbing(const dsl_pool_t *dp)
{
dsl_errorscrub_phys_t *errorscrub_phys = &dp->dp_scan->errorscrub_phys;
return (errorscrub_phys->dep_state == DSS_ERRORSCRUBBING &&
errorscrub_phys->dep_func == POOL_SCAN_ERRORSCRUB);
}
boolean_t
dsl_errorscrub_is_paused(const dsl_scan_t *scn)
{
return (dsl_errorscrubbing(scn->scn_dp) &&
scn->errorscrub_phys.dep_paused_flags);
}
boolean_t
dsl_scan_is_paused_scrub(const dsl_scan_t *scn)
{
return (dsl_scan_scrubbing(scn->scn_dp) &&
scn->scn_phys.scn_flags & DSF_SCRUB_PAUSED);
}
static void
dsl_errorscrub_sync_state(dsl_scan_t *scn, dmu_tx_t *tx)
{
scn->errorscrub_phys.dep_cursor =
zap_cursor_serialize(&scn->errorscrub_cursor);
VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ERRORSCRUB, sizeof (uint64_t), ERRORSCRUB_PHYS_NUMINTS,
&scn->errorscrub_phys, tx));
}
static void
dsl_errorscrub_setup_sync(void *arg, dmu_tx_t *tx)
{
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
pool_scan_func_t *funcp = arg;
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
ASSERT(!dsl_scan_is_running(scn));
ASSERT(!dsl_errorscrubbing(scn->scn_dp));
ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
memset(&scn->errorscrub_phys, 0, sizeof (scn->errorscrub_phys));
scn->errorscrub_phys.dep_func = *funcp;
scn->errorscrub_phys.dep_state = DSS_ERRORSCRUBBING;
scn->errorscrub_phys.dep_start_time = gethrestime_sec();
scn->errorscrub_phys.dep_to_examine = spa_get_last_errlog_size(spa);
scn->errorscrub_phys.dep_examined = 0;
scn->errorscrub_phys.dep_errors = 0;
scn->errorscrub_phys.dep_cursor = 0;
zap_cursor_init_serialized(&scn->errorscrub_cursor,
spa->spa_meta_objset, spa->spa_errlog_last,
scn->errorscrub_phys.dep_cursor);
vdev_config_dirty(spa->spa_root_vdev);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_START);
dsl_errorscrub_sync_state(scn, tx);
spa_history_log_internal(spa, "error scrub setup", tx,
"func=%u mintxg=%u maxtxg=%llu",
*funcp, 0, (u_longlong_t)tx->tx_txg);
}
static int
dsl_errorscrub_setup_check(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
if (dsl_scan_is_running(scn) || (dsl_errorscrubbing(scn->scn_dp))) {
return (SET_ERROR(EBUSY));
}
if (spa_get_last_errlog_size(scn->scn_dp->dp_spa) == 0) {
return (ECANCELED);
}
return (0);
}
/*
* Writes out a persistent dsl_scan_phys_t record to the pool directory.
* Because we can be running in the block sorting algorithm, we do not always
* want to write out the record, only when it is "safe" to do so. This safety
* condition is achieved by making sure that the sorting queues are empty
* (scn_queues_pending == 0). When this condition is not true, the sync'd state
* is inconsistent with how much actual scanning progress has been made. The
* kind of sync to be performed is specified by the sync_type argument. If the
* sync is optional, we only sync if the queues are empty. If the sync is
* mandatory, we do a hard ASSERT to make sure that the queues are empty. The
* third possible state is a "cached" sync. This is done in response to:
* 1) The dataset that was in the last sync'd dsl_scan_phys_t having been
* destroyed, so we wouldn't be able to restart scanning from it.
* 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been
* superseded by a newer snapshot.
* 3) The dataset that was in the last sync'd dsl_scan_phys_t having been
* swapped with its clone.
* In all cases, a cached sync simply rewrites the last record we've written,
* just slightly modified. For the modifications that are performed to the
* last written dsl_scan_phys_t, see dsl_scan_ds_destroyed,
* dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped.
*/
static void
dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type)
{
int i;
spa_t *spa = scn->scn_dp->dp_spa;
ASSERT(sync_type != SYNC_MANDATORY || scn->scn_queues_pending == 0);
if (scn->scn_queues_pending == 0) {
for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
dsl_scan_io_queue_t *q = vd->vdev_scan_io_queue;
if (q == NULL)
continue;
mutex_enter(&vd->vdev_scan_io_queue_lock);
ASSERT3P(avl_first(&q->q_sios_by_addr), ==, NULL);
ASSERT3P(zfs_btree_first(&q->q_exts_by_size, NULL), ==,
NULL);
ASSERT3P(range_tree_first(q->q_exts_by_addr), ==, NULL);
mutex_exit(&vd->vdev_scan_io_queue_lock);
}
if (scn->scn_phys.scn_queue_obj != 0)
scan_ds_queue_sync(scn, tx);
VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys, tx));
memcpy(&scn->scn_phys_cached, &scn->scn_phys,
sizeof (scn->scn_phys));
if (scn->scn_checkpointing)
zfs_dbgmsg("finish scan checkpoint for %s",
spa->spa_name);
scn->scn_checkpointing = B_FALSE;
scn->scn_last_checkpoint = ddi_get_lbolt();
} else if (sync_type == SYNC_CACHED) {
VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys_cached, tx));
}
}
int
dsl_scan_setup_check(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
if (dsl_scan_is_running(scn) || vdev_rebuild_active(rvd) ||
dsl_errorscrubbing(scn->scn_dp))
return (SET_ERROR(EBUSY));
return (0);
}
void
dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
pool_scan_func_t *funcp = arg;
dmu_object_type_t ot = 0;
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
ASSERT(!dsl_scan_is_running(scn));
ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
memset(&scn->scn_phys, 0, sizeof (scn->scn_phys));
/*
* If we are starting a fresh scrub, we erase the error scrub
* information from disk.
*/
memset(&scn->errorscrub_phys, 0, sizeof (scn->errorscrub_phys));
dsl_errorscrub_sync_state(scn, tx);
scn->scn_phys.scn_func = *funcp;
scn->scn_phys.scn_state = DSS_SCANNING;
scn->scn_phys.scn_min_txg = 0;
scn->scn_phys.scn_max_txg = tx->tx_txg;
scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */
scn->scn_phys.scn_start_time = gethrestime_sec();
scn->scn_phys.scn_errors = 0;
scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc;
scn->scn_issued_before_pass = 0;
scn->scn_restart_txg = 0;
scn->scn_done_txg = 0;
scn->scn_last_checkpoint = 0;
scn->scn_checkpointing = B_FALSE;
spa_scan_stat_init(spa);
vdev_scan_stat_init(spa->spa_root_vdev);
if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max;
/* rewrite all disk labels */
vdev_config_dirty(spa->spa_root_vdev);
if (vdev_resilver_needed(spa->spa_root_vdev,
&scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) {
nvlist_t *aux = fnvlist_alloc();
fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE,
"healing");
spa_event_notify(spa, NULL, aux,
ESC_ZFS_RESILVER_START);
nvlist_free(aux);
} else {
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_START);
}
spa->spa_scrub_started = B_TRUE;
/*
* If this is an incremental scrub, limit the DDT scrub phase
* to just the auto-ditto class (for correctness); the rest
* of the scrub should go faster using top-down pruning.
*/
if (scn->scn_phys.scn_min_txg > TXG_INITIAL)
scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO;
/*
* When starting a resilver clear any existing rebuild state.
* This is required to prevent stale rebuild status from
* being reported when a rebuild is run, then a resilver and
* finally a scrub. In which case only the scrub status
* should be reported by 'zpool status'.
*/
if (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) {
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
vdev_rebuild_clear_sync(
(void *)(uintptr_t)vd->vdev_id, tx);
}
}
}
/* back to the generic stuff */
if (zfs_scan_blkstats) {
if (dp->dp_blkstats == NULL) {
dp->dp_blkstats =
vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
}
memset(&dp->dp_blkstats->zab_type, 0,
sizeof (dp->dp_blkstats->zab_type));
} else {
if (dp->dp_blkstats) {
vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
dp->dp_blkstats = NULL;
}
}
if (spa_version(spa) < SPA_VERSION_DSL_SCRUB)
ot = DMU_OT_ZAP_OTHER;
scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset,
ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx);
memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys));
dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
spa_history_log_internal(spa, "scan setup", tx,
"func=%u mintxg=%llu maxtxg=%llu",
*funcp, (u_longlong_t)scn->scn_phys.scn_min_txg,
(u_longlong_t)scn->scn_phys.scn_max_txg);
}
/*
* Called by ZFS_IOC_POOL_SCRUB and ZFS_IOC_POOL_SCAN ioctl to start a scrub,
* error scrub or resilver. Can also be called to resume a paused scrub or
* error scrub.
*/
int
dsl_scan(dsl_pool_t *dp, pool_scan_func_t func)
{
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
/*
* Purge all vdev caches and probe all devices. We do this here
* rather than in sync context because this requires a writer lock
* on the spa_config lock, which we can't do from sync context. The
* spa_scrub_reopen flag indicates that vdev_open() should not
* attempt to start another scrub.
*/
spa_vdev_state_enter(spa, SCL_NONE);
spa->spa_scrub_reopen = B_TRUE;
vdev_reopen(spa->spa_root_vdev);
spa->spa_scrub_reopen = B_FALSE;
(void) spa_vdev_state_exit(spa, NULL, 0);
if (func == POOL_SCAN_RESILVER) {
dsl_scan_restart_resilver(spa->spa_dsl_pool, 0);
return (0);
}
if (func == POOL_SCAN_ERRORSCRUB) {
if (dsl_errorscrub_is_paused(dp->dp_scan)) {
/*
* got error scrub start cmd, resume paused error scrub.
*/
int err = dsl_scrub_set_pause_resume(scn->scn_dp,
POOL_SCRUB_NORMAL);
if (err == 0) {
spa_event_notify(spa, NULL, NULL,
ESC_ZFS_ERRORSCRUB_RESUME);
return (ECANCELED);
}
return (SET_ERROR(err));
}
return (dsl_sync_task(spa_name(dp->dp_spa),
dsl_errorscrub_setup_check, dsl_errorscrub_setup_sync,
&func, 0, ZFS_SPACE_CHECK_RESERVED));
}
if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) {
/* got scrub start cmd, resume paused scrub */
int err = dsl_scrub_set_pause_resume(scn->scn_dp,
POOL_SCRUB_NORMAL);
if (err == 0) {
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME);
return (SET_ERROR(ECANCELED));
}
return (SET_ERROR(err));
}
return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check,
dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
static void
dsl_errorscrub_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
if (complete) {
spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_FINISH);
spa_history_log_internal(spa, "error scrub done", tx,
"errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
} else {
spa_history_log_internal(spa, "error scrub canceled", tx,
"errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
}
scn->errorscrub_phys.dep_state = complete ? DSS_FINISHED : DSS_CANCELED;
spa->spa_scrub_active = B_FALSE;
spa_errlog_rotate(spa);
scn->errorscrub_phys.dep_end_time = gethrestime_sec();
zap_cursor_fini(&scn->errorscrub_cursor);
if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB)
spa->spa_errata = 0;
ASSERT(!dsl_errorscrubbing(scn->scn_dp));
}
static void
dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
{
static const char *old_names[] = {
"scrub_bookmark",
"scrub_ddt_bookmark",
"scrub_ddt_class_max",
"scrub_queue",
"scrub_min_txg",
"scrub_max_txg",
"scrub_func",
"scrub_errors",
NULL
};
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
int i;
/* Remove any remnants of an old-style scrub. */
for (i = 0; old_names[i]; i++) {
(void) zap_remove(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx);
}
if (scn->scn_phys.scn_queue_obj != 0) {
VERIFY0(dmu_object_free(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, tx));
scn->scn_phys.scn_queue_obj = 0;
}
scan_ds_queue_clear(scn);
scan_ds_prefetch_queue_clear(scn);
scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED;
/*
* If we were "restarted" from a stopped state, don't bother
* with anything else.
*/
if (!dsl_scan_is_running(scn)) {
ASSERT(!scn->scn_is_sorted);
return;
}
if (scn->scn_is_sorted) {
scan_io_queues_destroy(scn);
scn->scn_is_sorted = B_FALSE;
if (scn->scn_taskq != NULL) {
taskq_destroy(scn->scn_taskq);
scn->scn_taskq = NULL;
}
}
scn->scn_phys.scn_state = complete ? DSS_FINISHED : DSS_CANCELED;
spa_notify_waiters(spa);
if (dsl_scan_restarting(scn, tx))
spa_history_log_internal(spa, "scan aborted, restarting", tx,
"errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
else if (!complete)
spa_history_log_internal(spa, "scan cancelled", tx,
"errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
else
spa_history_log_internal(spa, "scan done", tx,
"errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
spa->spa_scrub_active = B_FALSE;
/*
* If the scrub/resilver completed, update all DTLs to
* reflect this. Whether it succeeded or not, vacate
* all temporary scrub DTLs.
*
* As the scrub does not currently support traversing
* data that have been freed but are part of a checkpoint,
* we don't mark the scrub as done in the DTLs as faults
* may still exist in those vdevs.
*/
if (complete &&
!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
scn->scn_phys.scn_max_txg, B_TRUE, B_FALSE);
if (scn->scn_phys.scn_min_txg) {
nvlist_t *aux = fnvlist_alloc();
fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE,
"healing");
spa_event_notify(spa, NULL, aux,
ESC_ZFS_RESILVER_FINISH);
nvlist_free(aux);
} else {
spa_event_notify(spa, NULL, NULL,
ESC_ZFS_SCRUB_FINISH);
}
} else {
vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
0, B_TRUE, B_FALSE);
}
spa_errlog_rotate(spa);
/*
* Don't clear flag until after vdev_dtl_reassess to ensure that
* DTL_MISSING will get updated when possible.
*/
spa->spa_scrub_started = B_FALSE;
/*
* We may have finished replacing a device.
* Let the async thread assess this and handle the detach.
*/
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
/*
* Clear any resilver_deferred flags in the config.
* If there are drives that need resilvering, kick
* off an asynchronous request to start resilver.
* vdev_clear_resilver_deferred() may update the config
* before the resilver can restart. In the event of
* a crash during this period, the spa loading code
* will find the drives that need to be resilvered
* and start the resilver then.
*/
if (spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER) &&
vdev_clear_resilver_deferred(spa->spa_root_vdev, tx)) {
spa_history_log_internal(spa,
"starting deferred resilver", tx, "errors=%llu",
(u_longlong_t)spa_approx_errlog_size(spa));
spa_async_request(spa, SPA_ASYNC_RESILVER);
}
/* Clear recent error events (i.e. duplicate events tracking) */
if (complete)
zfs_ereport_clear(spa, NULL);
}
scn->scn_phys.scn_end_time = gethrestime_sec();
if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB)
spa->spa_errata = 0;
ASSERT(!dsl_scan_is_running(scn));
}
static int
dsl_errorscrub_pause_resume_check(void *arg, dmu_tx_t *tx)
{
pool_scrub_cmd_t *cmd = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_scan_t *scn = dp->dp_scan;
if (*cmd == POOL_SCRUB_PAUSE) {
/*
* can't pause a error scrub when there is no in-progress
* error scrub.
*/
if (!dsl_errorscrubbing(dp))
return (SET_ERROR(ENOENT));
/* can't pause a paused error scrub */
if (dsl_errorscrub_is_paused(scn))
return (SET_ERROR(EBUSY));
} else if (*cmd != POOL_SCRUB_NORMAL) {
return (SET_ERROR(ENOTSUP));
}
return (0);
}
static void
dsl_errorscrub_pause_resume_sync(void *arg, dmu_tx_t *tx)
{
pool_scrub_cmd_t *cmd = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
if (*cmd == POOL_SCRUB_PAUSE) {
spa->spa_scan_pass_errorscrub_pause = gethrestime_sec();
scn->errorscrub_phys.dep_paused_flags = B_TRUE;
dsl_errorscrub_sync_state(scn, tx);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_PAUSED);
} else {
ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL);
if (dsl_errorscrub_is_paused(scn)) {
/*
* We need to keep track of how much time we spend
* paused per pass so that we can adjust the error scrub
* rate shown in the output of 'zpool status'.
*/
spa->spa_scan_pass_errorscrub_spent_paused +=
gethrestime_sec() -
spa->spa_scan_pass_errorscrub_pause;
spa->spa_scan_pass_errorscrub_pause = 0;
scn->errorscrub_phys.dep_paused_flags = B_FALSE;
zap_cursor_init_serialized(
&scn->errorscrub_cursor,
spa->spa_meta_objset, spa->spa_errlog_last,
scn->errorscrub_phys.dep_cursor);
dsl_errorscrub_sync_state(scn, tx);
}
}
}
static int
dsl_errorscrub_cancel_check(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
/* can't cancel a error scrub when there is no one in-progress */
if (!dsl_errorscrubbing(scn->scn_dp))
return (SET_ERROR(ENOENT));
return (0);
}
static void
dsl_errorscrub_cancel_sync(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
dsl_errorscrub_done(scn, B_FALSE, tx);
dsl_errorscrub_sync_state(scn, tx);
spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL,
ESC_ZFS_ERRORSCRUB_ABORT);
}
static int
dsl_scan_cancel_check(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
if (!dsl_scan_is_running(scn))
return (SET_ERROR(ENOENT));
return (0);
}
static void
dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
dsl_scan_done(scn, B_FALSE, tx);
dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, ESC_ZFS_SCRUB_ABORT);
}
int
dsl_scan_cancel(dsl_pool_t *dp)
{
if (dsl_errorscrubbing(dp)) {
return (dsl_sync_task(spa_name(dp->dp_spa),
dsl_errorscrub_cancel_check, dsl_errorscrub_cancel_sync,
NULL, 3, ZFS_SPACE_CHECK_RESERVED));
}
return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check,
dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED));
}
static int
dsl_scrub_pause_resume_check(void *arg, dmu_tx_t *tx)
{
pool_scrub_cmd_t *cmd = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_scan_t *scn = dp->dp_scan;
if (*cmd == POOL_SCRUB_PAUSE) {
/* can't pause a scrub when there is no in-progress scrub */
if (!dsl_scan_scrubbing(dp))
return (SET_ERROR(ENOENT));
/* can't pause a paused scrub */
if (dsl_scan_is_paused_scrub(scn))
return (SET_ERROR(EBUSY));
} else if (*cmd != POOL_SCRUB_NORMAL) {
return (SET_ERROR(ENOTSUP));
}
return (0);
}
static void
dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx)
{
pool_scrub_cmd_t *cmd = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
if (*cmd == POOL_SCRUB_PAUSE) {
/* can't pause a scrub when there is no in-progress scrub */
spa->spa_scan_pass_scrub_pause = gethrestime_sec();
scn->scn_phys.scn_flags |= DSF_SCRUB_PAUSED;
scn->scn_phys_cached.scn_flags |= DSF_SCRUB_PAUSED;
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED);
spa_notify_waiters(spa);
} else {
ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL);
if (dsl_scan_is_paused_scrub(scn)) {
/*
* We need to keep track of how much time we spend
* paused per pass so that we can adjust the scrub rate
* shown in the output of 'zpool status'
*/
spa->spa_scan_pass_scrub_spent_paused +=
gethrestime_sec() - spa->spa_scan_pass_scrub_pause;
spa->spa_scan_pass_scrub_pause = 0;
scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED;
scn->scn_phys_cached.scn_flags &= ~DSF_SCRUB_PAUSED;
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
}
}
/*
* Set scrub pause/resume state if it makes sense to do so
*/
int
dsl_scrub_set_pause_resume(const dsl_pool_t *dp, pool_scrub_cmd_t cmd)
{
if (dsl_errorscrubbing(dp)) {
return (dsl_sync_task(spa_name(dp->dp_spa),
dsl_errorscrub_pause_resume_check,
dsl_errorscrub_pause_resume_sync, &cmd, 3,
ZFS_SPACE_CHECK_RESERVED));
}
return (dsl_sync_task(spa_name(dp->dp_spa),
dsl_scrub_pause_resume_check, dsl_scrub_pause_resume_sync, &cmd, 3,
ZFS_SPACE_CHECK_RESERVED));
}
/* start a new scan, or restart an existing one. */
void
dsl_scan_restart_resilver(dsl_pool_t *dp, uint64_t txg)
{
if (txg == 0) {
dmu_tx_t *tx;
tx = dmu_tx_create_dd(dp->dp_mos_dir);
VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT));
txg = dmu_tx_get_txg(tx);
dp->dp_scan->scn_restart_txg = txg;
dmu_tx_commit(tx);
} else {
dp->dp_scan->scn_restart_txg = txg;
}
zfs_dbgmsg("restarting resilver for %s at txg=%llu",
dp->dp_spa->spa_name, (longlong_t)txg);
}
void
dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp)
{
zio_free(dp->dp_spa, txg, bp);
}
void
dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp)
{
ASSERT(dsl_pool_sync_context(dp));
zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags));
}
static int
scan_ds_queue_compare(const void *a, const void *b)
{
const scan_ds_t *sds_a = a, *sds_b = b;
if (sds_a->sds_dsobj < sds_b->sds_dsobj)
return (-1);
if (sds_a->sds_dsobj == sds_b->sds_dsobj)
return (0);
return (1);
}
static void
scan_ds_queue_clear(dsl_scan_t *scn)
{
void *cookie = NULL;
scan_ds_t *sds;
while ((sds = avl_destroy_nodes(&scn->scn_queue, &cookie)) != NULL) {
kmem_free(sds, sizeof (*sds));
}
}
static boolean_t
scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, uint64_t *txg)
{
scan_ds_t srch, *sds;
srch.sds_dsobj = dsobj;
sds = avl_find(&scn->scn_queue, &srch, NULL);
if (sds != NULL && txg != NULL)
*txg = sds->sds_txg;
return (sds != NULL);
}
static void
scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg)
{
scan_ds_t *sds;
avl_index_t where;
sds = kmem_zalloc(sizeof (*sds), KM_SLEEP);
sds->sds_dsobj = dsobj;
sds->sds_txg = txg;
VERIFY3P(avl_find(&scn->scn_queue, sds, &where), ==, NULL);
avl_insert(&scn->scn_queue, sds, where);
}
static void
scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj)
{
scan_ds_t srch, *sds;
srch.sds_dsobj = dsobj;
sds = avl_find(&scn->scn_queue, &srch, NULL);
VERIFY(sds != NULL);
avl_remove(&scn->scn_queue, sds);
kmem_free(sds, sizeof (*sds));
}
static void
scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
dmu_object_type_t ot = (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) ?
DMU_OT_SCAN_QUEUE : DMU_OT_ZAP_OTHER;
ASSERT0(scn->scn_queues_pending);
ASSERT(scn->scn_phys.scn_queue_obj != 0);
VERIFY0(dmu_object_free(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, tx));
scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, ot,
DMU_OT_NONE, 0, tx);
for (scan_ds_t *sds = avl_first(&scn->scn_queue);
sds != NULL; sds = AVL_NEXT(&scn->scn_queue, sds)) {
VERIFY0(zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, sds->sds_dsobj,
sds->sds_txg, tx));
}
}
/*
* Computes the memory limit state that we're currently in. A sorted scan
* needs quite a bit of memory to hold the sorting queue, so we need to
* reasonably constrain the size so it doesn't impact overall system
* performance. We compute two limits:
* 1) Hard memory limit: if the amount of memory used by the sorting
* queues on a pool gets above this value, we stop the metadata
* scanning portion and start issuing the queued up and sorted
* I/Os to reduce memory usage.
* This limit is calculated as a fraction of physmem (by default 5%).
* We constrain the lower bound of the hard limit to an absolute
* minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain
* the upper bound to 5% of the total pool size - no chance we'll
* ever need that much memory, but just to keep the value in check.
* 2) Soft memory limit: once we hit the hard memory limit, we start
* issuing I/O to reduce queue memory usage, but we don't want to
* completely empty out the queues, since we might be able to find I/Os
* that will fill in the gaps of our non-sequential IOs at some point
* in the future. So we stop the issuing of I/Os once the amount of
* memory used drops below the soft limit (at which point we stop issuing
* I/O and start scanning metadata again).
*
* This limit is calculated by subtracting a fraction of the hard
* limit from the hard limit. By default this fraction is 5%, so
* the soft limit is 95% of the hard limit. We cap the size of the
* difference between the hard and soft limits at an absolute
* maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is
* sufficient to not cause too frequent switching between the
* metadata scan and I/O issue (even at 2k recordsize, 128 MiB's
* worth of queues is about 1.2 GiB of on-pool data, so scanning
* that should take at least a decent fraction of a second).
*/
static boolean_t
dsl_scan_should_clear(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
uint64_t alloc, mlim_hard, mlim_soft, mused;
alloc = metaslab_class_get_alloc(spa_normal_class(spa));
alloc += metaslab_class_get_alloc(spa_special_class(spa));
alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
mlim_hard = MAX((physmem / zfs_scan_mem_lim_fact) * PAGESIZE,
zfs_scan_mem_lim_min);
mlim_hard = MIN(mlim_hard, alloc / 20);
mlim_soft = mlim_hard - MIN(mlim_hard / zfs_scan_mem_lim_soft_fact,
zfs_scan_mem_lim_soft_max);
mused = 0;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *tvd = rvd->vdev_child[i];
dsl_scan_io_queue_t *queue;
mutex_enter(&tvd->vdev_scan_io_queue_lock);
queue = tvd->vdev_scan_io_queue;
if (queue != NULL) {
/*
* # of extents in exts_by_addr = # in exts_by_size.
* B-tree efficiency is ~75%, but can be as low as 50%.
*/
mused += zfs_btree_numnodes(&queue->q_exts_by_size) *
((sizeof (range_seg_gap_t) + sizeof (uint64_t)) *
3 / 2) + queue->q_sio_memused;
}
mutex_exit(&tvd->vdev_scan_io_queue_lock);
}
dprintf("current scan memory usage: %llu bytes\n", (longlong_t)mused);
if (mused == 0)
ASSERT0(scn->scn_queues_pending);
/*
* If we are above our hard limit, we need to clear out memory.
* If we are below our soft limit, we need to accumulate sequential IOs.
* Otherwise, we should keep doing whatever we are currently doing.
*/
if (mused >= mlim_hard)
return (B_TRUE);
else if (mused < mlim_soft)
return (B_FALSE);
else
return (scn->scn_clearing);
}
static boolean_t
dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb)
{
/* we never skip user/group accounting objects */
if (zb && (int64_t)zb->zb_object < 0)
return (B_FALSE);
if (scn->scn_suspending)
return (B_TRUE); /* we're already suspending */
if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark))
return (B_FALSE); /* we're resuming */
/* We only know how to resume from level-0 and objset blocks. */
if (zb && (zb->zb_level != 0 && zb->zb_level != ZB_ROOT_LEVEL))
return (B_FALSE);
/*
* We suspend if:
* - we have scanned for at least the minimum time (default 1 sec
* for scrub, 3 sec for resilver), and either we have sufficient
* dirty data that we are starting to write more quickly
* (default 30%), someone is explicitly waiting for this txg
* to complete, or we have used up all of the time in the txg
* timeout (default 5 sec).
* or
* - the spa is shutting down because this pool is being exported
* or the machine is rebooting.
* or
* - the scan queue has reached its memory use limit
*/
uint64_t curr_time_ns = gethrtime();
uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
uint64_t sync_time_ns = curr_time_ns -
scn->scn_dp->dp_spa->spa_sync_starttime;
uint64_t dirty_min_bytes = zfs_dirty_data_max *
zfs_vdev_async_write_active_min_dirty_percent / 100;
uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
if ((NSEC2MSEC(scan_time_ns) > mintime &&
(scn->scn_dp->dp_dirty_total >= dirty_min_bytes ||
txg_sync_waiting(scn->scn_dp) ||
NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
spa_shutting_down(scn->scn_dp->dp_spa) ||
(zfs_scan_strict_mem_lim && dsl_scan_should_clear(scn))) {
if (zb && zb->zb_level == ZB_ROOT_LEVEL) {
dprintf("suspending at first available bookmark "
"%llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
SET_BOOKMARK(&scn->scn_phys.scn_bookmark,
zb->zb_objset, 0, 0, 0);
} else if (zb != NULL) {
dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
scn->scn_phys.scn_bookmark = *zb;
} else {
#ifdef ZFS_DEBUG
dsl_scan_phys_t *scnp = &scn->scn_phys;
dprintf("suspending at at DDT bookmark "
"%llx/%llx/%llx/%llx\n",
(longlong_t)scnp->scn_ddt_bookmark.ddb_class,
(longlong_t)scnp->scn_ddt_bookmark.ddb_type,
(longlong_t)scnp->scn_ddt_bookmark.ddb_checksum,
(longlong_t)scnp->scn_ddt_bookmark.ddb_cursor);
#endif
}
scn->scn_suspending = B_TRUE;
return (B_TRUE);
}
return (B_FALSE);
}
static boolean_t
dsl_error_scrub_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb)
{
/*
* We suspend if:
* - we have scrubbed for at least the minimum time (default 1 sec
* for error scrub), someone is explicitly waiting for this txg
* to complete, or we have used up all of the time in the txg
* timeout (default 5 sec).
* or
* - the spa is shutting down because this pool is being exported
* or the machine is rebooting.
*/
uint64_t curr_time_ns = gethrtime();
uint64_t error_scrub_time_ns = curr_time_ns - scn->scn_sync_start_time;
uint64_t sync_time_ns = curr_time_ns -
scn->scn_dp->dp_spa->spa_sync_starttime;
int mintime = zfs_scrub_min_time_ms;
if ((NSEC2MSEC(error_scrub_time_ns) > mintime &&
(txg_sync_waiting(scn->scn_dp) ||
NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
spa_shutting_down(scn->scn_dp->dp_spa)) {
if (zb) {
dprintf("error scrub suspending at bookmark "
"%llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
}
return (B_TRUE);
}
return (B_FALSE);
}
typedef struct zil_scan_arg {
dsl_pool_t *zsa_dp;
zil_header_t *zsa_zh;
} zil_scan_arg_t;
static int
dsl_scan_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg,
uint64_t claim_txg)
{
(void) zilog;
zil_scan_arg_t *zsa = arg;
dsl_pool_t *dp = zsa->zsa_dp;
dsl_scan_t *scn = dp->dp_scan;
zil_header_t *zh = zsa->zsa_zh;
zbookmark_phys_t zb;
ASSERT(!BP_IS_REDACTED(bp));
if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
return (0);
/*
* One block ("stubby") can be allocated a long time ago; we
* want to visit that one because it has been allocated
* (on-disk) even if it hasn't been claimed (even though for
* scrub there's nothing to do to it).
*/
if (claim_txg == 0 && bp->blk_birth >= spa_min_claim_txg(dp->dp_spa))
return (0);
SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
return (0);
}
static int
dsl_scan_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg,
uint64_t claim_txg)
{
(void) zilog;
if (lrc->lrc_txtype == TX_WRITE) {
zil_scan_arg_t *zsa = arg;
dsl_pool_t *dp = zsa->zsa_dp;
dsl_scan_t *scn = dp->dp_scan;
zil_header_t *zh = zsa->zsa_zh;
const lr_write_t *lr = (const lr_write_t *)lrc;
const blkptr_t *bp = &lr->lr_blkptr;
zbookmark_phys_t zb;
ASSERT(!BP_IS_REDACTED(bp));
if (BP_IS_HOLE(bp) ||
bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
return (0);
/*
* birth can be < claim_txg if this record's txg is
* already txg sync'ed (but this log block contains
* other records that are not synced)
*/
if (claim_txg == 0 || bp->blk_birth < claim_txg)
return (0);
ASSERT3U(BP_GET_LSIZE(bp), !=, 0);
SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
lr->lr_foid, ZB_ZIL_LEVEL,
lr->lr_offset / BP_GET_LSIZE(bp));
VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
}
return (0);
}
static void
dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh)
{
uint64_t claim_txg = zh->zh_claim_txg;
zil_scan_arg_t zsa = { dp, zh };
zilog_t *zilog;
ASSERT(spa_writeable(dp->dp_spa));
/*
* We only want to visit blocks that have been claimed but not yet
* replayed (or, in read-only mode, blocks that *would* be claimed).
*/
if (claim_txg == 0)
return;
zilog = zil_alloc(dp->dp_meta_objset, zh);
(void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa,
claim_txg, B_FALSE);
zil_free(zilog);
}
/*
* We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea
* here is to sort the AVL tree by the order each block will be needed.
*/
static int
scan_prefetch_queue_compare(const void *a, const void *b)
{
const scan_prefetch_issue_ctx_t *spic_a = a, *spic_b = b;
const scan_prefetch_ctx_t *spc_a = spic_a->spic_spc;
const scan_prefetch_ctx_t *spc_b = spic_b->spic_spc;
return (zbookmark_compare(spc_a->spc_datablkszsec,
spc_a->spc_indblkshift, spc_b->spc_datablkszsec,
spc_b->spc_indblkshift, &spic_a->spic_zb, &spic_b->spic_zb));
}
static void
scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, const void *tag)
{
if (zfs_refcount_remove(&spc->spc_refcnt, tag) == 0) {
zfs_refcount_destroy(&spc->spc_refcnt);
kmem_free(spc, sizeof (scan_prefetch_ctx_t));
}
}
static scan_prefetch_ctx_t *
scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, const void *tag)
{
scan_prefetch_ctx_t *spc;
spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP);
zfs_refcount_create(&spc->spc_refcnt);
zfs_refcount_add(&spc->spc_refcnt, tag);
spc->spc_scn = scn;
if (dnp != NULL) {
spc->spc_datablkszsec = dnp->dn_datablkszsec;
spc->spc_indblkshift = dnp->dn_indblkshift;
spc->spc_root = B_FALSE;
} else {
spc->spc_datablkszsec = 0;
spc->spc_indblkshift = 0;
spc->spc_root = B_TRUE;
}
return (spc);
}
static void
scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, const void *tag)
{
zfs_refcount_add(&spc->spc_refcnt, tag);
}
static void
scan_ds_prefetch_queue_clear(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
void *cookie = NULL;
scan_prefetch_issue_ctx_t *spic = NULL;
mutex_enter(&spa->spa_scrub_lock);
while ((spic = avl_destroy_nodes(&scn->scn_prefetch_queue,
&cookie)) != NULL) {
scan_prefetch_ctx_rele(spic->spic_spc, scn);
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
}
mutex_exit(&spa->spa_scrub_lock);
}
static boolean_t
dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t *spc,
const zbookmark_phys_t *zb)
{
zbookmark_phys_t *last_zb = &spc->spc_scn->scn_prefetch_bookmark;
dnode_phys_t tmp_dnp;
dnode_phys_t *dnp = (spc->spc_root) ? NULL : &tmp_dnp;
if (zb->zb_objset != last_zb->zb_objset)
return (B_TRUE);
if ((int64_t)zb->zb_object < 0)
return (B_FALSE);
tmp_dnp.dn_datablkszsec = spc->spc_datablkszsec;
tmp_dnp.dn_indblkshift = spc->spc_indblkshift;
if (zbookmark_subtree_completed(dnp, zb, last_zb))
return (B_TRUE);
return (B_FALSE);
}
static void
dsl_scan_prefetch(scan_prefetch_ctx_t *spc, blkptr_t *bp, zbookmark_phys_t *zb)
{
avl_index_t idx;
dsl_scan_t *scn = spc->spc_scn;
spa_t *spa = scn->scn_dp->dp_spa;
scan_prefetch_issue_ctx_t *spic;
if (zfs_no_scrub_prefetch || BP_IS_REDACTED(bp))
return;
if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg ||
(BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE &&
BP_GET_TYPE(bp) != DMU_OT_OBJSET))
return;
if (dsl_scan_check_prefetch_resume(spc, zb))
return;
scan_prefetch_ctx_add_ref(spc, scn);
spic = kmem_alloc(sizeof (scan_prefetch_issue_ctx_t), KM_SLEEP);
spic->spic_spc = spc;
spic->spic_bp = *bp;
spic->spic_zb = *zb;
/*
* Add the IO to the queue of blocks to prefetch. This allows us to
* prioritize blocks that we will need first for the main traversal
* thread.
*/
mutex_enter(&spa->spa_scrub_lock);
if (avl_find(&scn->scn_prefetch_queue, spic, &idx) != NULL) {
/* this block is already queued for prefetch */
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
scan_prefetch_ctx_rele(spc, scn);
mutex_exit(&spa->spa_scrub_lock);
return;
}
avl_insert(&scn->scn_prefetch_queue, spic, idx);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
}
static void
dsl_scan_prefetch_dnode(dsl_scan_t *scn, dnode_phys_t *dnp,
uint64_t objset, uint64_t object)
{
int i;
zbookmark_phys_t zb;
scan_prefetch_ctx_t *spc;
if (dnp->dn_nblkptr == 0 && !(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
return;
SET_BOOKMARK(&zb, objset, object, 0, 0);
spc = scan_prefetch_ctx_create(scn, dnp, FTAG);
for (i = 0; i < dnp->dn_nblkptr; i++) {
zb.zb_level = BP_GET_LEVEL(&dnp->dn_blkptr[i]);
zb.zb_blkid = i;
dsl_scan_prefetch(spc, &dnp->dn_blkptr[i], &zb);
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
zb.zb_level = 0;
zb.zb_blkid = DMU_SPILL_BLKID;
dsl_scan_prefetch(spc, DN_SPILL_BLKPTR(dnp), &zb);
}
scan_prefetch_ctx_rele(spc, FTAG);
}
static void
dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *private)
{
(void) zio;
scan_prefetch_ctx_t *spc = private;
dsl_scan_t *scn = spc->spc_scn;
spa_t *spa = scn->scn_dp->dp_spa;
/* broadcast that the IO has completed for rate limiting purposes */
mutex_enter(&spa->spa_scrub_lock);
ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp));
spa->spa_scrub_inflight -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
/* if there was an error or we are done prefetching, just cleanup */
if (buf == NULL || scn->scn_prefetch_stop)
goto out;
if (BP_GET_LEVEL(bp) > 0) {
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
zbookmark_phys_t czb;
for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
zb->zb_level - 1, zb->zb_blkid * epb + i);
dsl_scan_prefetch(spc, cbp, &czb);
}
} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
dnode_phys_t *cdnp;
int i;
int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
for (i = 0, cdnp = buf->b_data; i < epb;
i += cdnp->dn_extra_slots + 1,
cdnp += cdnp->dn_extra_slots + 1) {
dsl_scan_prefetch_dnode(scn, cdnp,
zb->zb_objset, zb->zb_blkid * epb + i);
}
} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
objset_phys_t *osp = buf->b_data;
dsl_scan_prefetch_dnode(scn, &osp->os_meta_dnode,
zb->zb_objset, DMU_META_DNODE_OBJECT);
if (OBJSET_BUF_HAS_USERUSED(buf)) {
+ if (OBJSET_BUF_HAS_PROJECTUSED(buf)) {
+ dsl_scan_prefetch_dnode(scn,
+ &osp->os_projectused_dnode, zb->zb_objset,
+ DMU_PROJECTUSED_OBJECT);
+ }
dsl_scan_prefetch_dnode(scn,
&osp->os_groupused_dnode, zb->zb_objset,
DMU_GROUPUSED_OBJECT);
dsl_scan_prefetch_dnode(scn,
&osp->os_userused_dnode, zb->zb_objset,
DMU_USERUSED_OBJECT);
}
}
out:
if (buf != NULL)
arc_buf_destroy(buf, private);
scan_prefetch_ctx_rele(spc, scn);
}
static void
dsl_scan_prefetch_thread(void *arg)
{
dsl_scan_t *scn = arg;
spa_t *spa = scn->scn_dp->dp_spa;
scan_prefetch_issue_ctx_t *spic;
/* loop until we are told to stop */
while (!scn->scn_prefetch_stop) {
arc_flags_t flags = ARC_FLAG_NOWAIT |
ARC_FLAG_PRESCIENT_PREFETCH | ARC_FLAG_PREFETCH;
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
mutex_enter(&spa->spa_scrub_lock);
/*
* Wait until we have an IO to issue and are not above our
* maximum in flight limit.
*/
while (!scn->scn_prefetch_stop &&
(avl_numnodes(&scn->scn_prefetch_queue) == 0 ||
spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)) {
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
}
/* recheck if we should stop since we waited for the cv */
if (scn->scn_prefetch_stop) {
mutex_exit(&spa->spa_scrub_lock);
break;
}
/* remove the prefetch IO from the tree */
spic = avl_first(&scn->scn_prefetch_queue);
spa->spa_scrub_inflight += BP_GET_PSIZE(&spic->spic_bp);
avl_remove(&scn->scn_prefetch_queue, spic);
mutex_exit(&spa->spa_scrub_lock);
if (BP_IS_PROTECTED(&spic->spic_bp)) {
ASSERT(BP_GET_TYPE(&spic->spic_bp) == DMU_OT_DNODE ||
BP_GET_TYPE(&spic->spic_bp) == DMU_OT_OBJSET);
ASSERT3U(BP_GET_LEVEL(&spic->spic_bp), ==, 0);
zio_flags |= ZIO_FLAG_RAW;
}
+ /* We don't need data L1 buffer since we do not prefetch L0. */
+ blkptr_t *bp = &spic->spic_bp;
+ if (BP_GET_LEVEL(bp) == 1 && BP_GET_TYPE(bp) != DMU_OT_DNODE &&
+ BP_GET_TYPE(bp) != DMU_OT_OBJSET)
+ flags |= ARC_FLAG_NO_BUF;
+
/* issue the prefetch asynchronously */
- (void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa,
- &spic->spic_bp, dsl_scan_prefetch_cb, spic->spic_spc,
- ZIO_PRIORITY_SCRUB, zio_flags, &flags, &spic->spic_zb);
+ (void) arc_read(scn->scn_zio_root, spa, bp,
+ dsl_scan_prefetch_cb, spic->spic_spc, ZIO_PRIORITY_SCRUB,
+ zio_flags, &flags, &spic->spic_zb);
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
}
ASSERT(scn->scn_prefetch_stop);
/* free any prefetches we didn't get to complete */
mutex_enter(&spa->spa_scrub_lock);
while ((spic = avl_first(&scn->scn_prefetch_queue)) != NULL) {
avl_remove(&scn->scn_prefetch_queue, spic);
scan_prefetch_ctx_rele(spic->spic_spc, scn);
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
}
ASSERT0(avl_numnodes(&scn->scn_prefetch_queue));
mutex_exit(&spa->spa_scrub_lock);
}
static boolean_t
dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp,
const zbookmark_phys_t *zb)
{
/*
* We never skip over user/group accounting objects (obj<0)
*/
if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) &&
(int64_t)zb->zb_object >= 0) {
/*
* If we already visited this bp & everything below (in
* a prior txg sync), don't bother doing it again.
*/
if (zbookmark_subtree_completed(dnp, zb,
&scn->scn_phys.scn_bookmark))
return (B_TRUE);
/*
* If we found the block we're trying to resume from, or
* we went past it, zero it out to indicate that it's OK
* to start checking for suspending again.
*/
if (zbookmark_subtree_tbd(dnp, zb,
&scn->scn_phys.scn_bookmark)) {
dprintf("resuming at %llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
memset(&scn->scn_phys.scn_bookmark, 0, sizeof (*zb));
}
}
return (B_FALSE);
}
static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
dmu_objset_type_t ostype, dmu_tx_t *tx);
inline __attribute__((always_inline)) static void dsl_scan_visitdnode(
dsl_scan_t *, dsl_dataset_t *ds, dmu_objset_type_t ostype,
dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx);
/*
* Return nonzero on i/o error.
* Return new buf to write out in *bufp.
*/
inline __attribute__((always_inline)) static int
dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
dnode_phys_t *dnp, const blkptr_t *bp,
const zbookmark_phys_t *zb, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
int err;
ASSERT(!BP_IS_REDACTED(bp));
/*
* There is an unlikely case of encountering dnodes with contradicting
* dn_bonuslen and DNODE_FLAG_SPILL_BLKPTR flag before in files created
* or modified before commit 4254acb was merged. As it is not possible
* to know which of the two is correct, report an error.
*/
if (dnp != NULL &&
dnp->dn_bonuslen > DN_MAX_BONUS_LEN(dnp)) {
scn->scn_phys.scn_errors++;
spa_log_error(spa, zb, &bp->blk_birth);
return (SET_ERROR(EINVAL));
}
if (BP_GET_LEVEL(bp) > 0) {
arc_flags_t flags = ARC_FLAG_WAIT;
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
arc_buf_t *buf;
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
return (err);
}
for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
zb->zb_level - 1,
zb->zb_blkid * epb + i);
dsl_scan_visitbp(cbp, &czb, dnp,
ds, scn, ostype, tx);
}
arc_buf_destroy(buf, &buf);
} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
arc_flags_t flags = ARC_FLAG_WAIT;
dnode_phys_t *cdnp;
int i;
int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
arc_buf_t *buf;
if (BP_IS_PROTECTED(bp)) {
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
zio_flags |= ZIO_FLAG_RAW;
}
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
return (err);
}
for (i = 0, cdnp = buf->b_data; i < epb;
i += cdnp->dn_extra_slots + 1,
cdnp += cdnp->dn_extra_slots + 1) {
dsl_scan_visitdnode(scn, ds, ostype,
cdnp, zb->zb_blkid * epb + i, tx);
}
arc_buf_destroy(buf, &buf);
} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
arc_flags_t flags = ARC_FLAG_WAIT;
objset_phys_t *osp;
arc_buf_t *buf;
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
return (err);
}
osp = buf->b_data;
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx);
if (OBJSET_BUF_HAS_USERUSED(buf)) {
/*
* We also always visit user/group/project accounting
* objects, and never skip them, even if we are
* suspending. This is necessary so that the
* space deltas from this txg get integrated.
*/
if (OBJSET_BUF_HAS_PROJECTUSED(buf))
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_projectused_dnode,
DMU_PROJECTUSED_OBJECT, tx);
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_groupused_dnode,
DMU_GROUPUSED_OBJECT, tx);
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_userused_dnode,
DMU_USERUSED_OBJECT, tx);
}
arc_buf_destroy(buf, &buf);
} else if (!zfs_blkptr_verify(spa, bp,
BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) {
/*
* Sanity check the block pointer contents, this is handled
* by arc_read() for the cases above.
*/
scn->scn_phys.scn_errors++;
spa_log_error(spa, zb, &bp->blk_birth);
return (SET_ERROR(EINVAL));
}
return (0);
}
inline __attribute__((always_inline)) static void
dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds,
dmu_objset_type_t ostype, dnode_phys_t *dnp,
uint64_t object, dmu_tx_t *tx)
{
int j;
for (j = 0; j < dnp->dn_nblkptr; j++) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
dnp->dn_nlevels - 1, j);
dsl_scan_visitbp(&dnp->dn_blkptr[j],
&czb, dnp, ds, scn, ostype, tx);
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
0, DMU_SPILL_BLKID);
dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp),
&czb, dnp, ds, scn, ostype, tx);
}
}
/*
* The arguments are in this order because mdb can only print the
* first 5; we want them to be useful.
*/
static void
dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
dmu_objset_type_t ostype, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
blkptr_t *bp_toread = NULL;
if (dsl_scan_check_suspend(scn, zb))
return;
if (dsl_scan_check_resume(scn, dnp, zb))
return;
scn->scn_visited_this_txg++;
if (BP_IS_HOLE(bp)) {
scn->scn_holes_this_txg++;
return;
}
if (BP_IS_REDACTED(bp)) {
ASSERT(dsl_dataset_feature_is_active(ds,
SPA_FEATURE_REDACTED_DATASETS));
return;
}
/*
* Check if this block contradicts any filesystem flags.
*/
spa_feature_t f = SPA_FEATURE_LARGE_BLOCKS;
if (BP_GET_LSIZE(bp) > SPA_OLD_MAXBLOCKSIZE)
ASSERT(dsl_dataset_feature_is_active(ds, f));
f = zio_checksum_to_feature(BP_GET_CHECKSUM(bp));
if (f != SPA_FEATURE_NONE)
ASSERT(dsl_dataset_feature_is_active(ds, f));
f = zio_compress_to_feature(BP_GET_COMPRESS(bp));
if (f != SPA_FEATURE_NONE)
ASSERT(dsl_dataset_feature_is_active(ds, f));
if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) {
scn->scn_lt_min_this_txg++;
return;
}
bp_toread = kmem_alloc(sizeof (blkptr_t), KM_SLEEP);
*bp_toread = *bp;
if (dsl_scan_recurse(scn, ds, ostype, dnp, bp_toread, zb, tx) != 0)
goto out;
/*
* If dsl_scan_ddt() has already visited this block, it will have
* already done any translations or scrubbing, so don't call the
* callback again.
*/
if (ddt_class_contains(dp->dp_spa,
scn->scn_phys.scn_ddt_class_max, bp)) {
scn->scn_ddt_contained_this_txg++;
goto out;
}
/*
* If this block is from the future (after cur_max_txg), then we
* are doing this on behalf of a deleted snapshot, and we will
* revisit the future block on the next pass of this dataset.
* Don't scan it now unless we need to because something
* under it was modified.
*/
if (BP_PHYSICAL_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) {
scn->scn_gt_max_this_txg++;
goto out;
}
scan_funcs[scn->scn_phys.scn_func](dp, bp, zb);
out:
kmem_free(bp_toread, sizeof (blkptr_t));
}
static void
dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp,
dmu_tx_t *tx)
{
zbookmark_phys_t zb;
scan_prefetch_ctx_t *spc;
SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
if (ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) {
SET_BOOKMARK(&scn->scn_prefetch_bookmark,
zb.zb_objset, 0, 0, 0);
} else {
scn->scn_prefetch_bookmark = scn->scn_phys.scn_bookmark;
}
scn->scn_objsets_visited_this_txg++;
spc = scan_prefetch_ctx_create(scn, NULL, FTAG);
dsl_scan_prefetch(spc, bp, &zb);
scan_prefetch_ctx_rele(spc, FTAG);
dsl_scan_visitbp(bp, &zb, NULL, ds, scn, DMU_OST_NONE, tx);
dprintf_ds(ds, "finished scan%s", "");
}
static void
ds_destroyed_scn_phys(dsl_dataset_t *ds, dsl_scan_phys_t *scn_phys)
{
if (scn_phys->scn_bookmark.zb_objset == ds->ds_object) {
if (ds->ds_is_snapshot) {
/*
* Note:
* - scn_cur_{min,max}_txg stays the same.
* - Setting the flag is not really necessary if
* scn_cur_max_txg == scn_max_txg, because there
* is nothing after this snapshot that we care
* about. However, we set it anyway and then
* ignore it when we retraverse it in
* dsl_scan_visitds().
*/
scn_phys->scn_bookmark.zb_objset =
dsl_dataset_phys(ds)->ds_next_snap_obj;
zfs_dbgmsg("destroying ds %llu on %s; currently "
"traversing; reset zb_objset to %llu",
(u_longlong_t)ds->ds_object,
ds->ds_dir->dd_pool->dp_spa->spa_name,
(u_longlong_t)dsl_dataset_phys(ds)->
ds_next_snap_obj);
scn_phys->scn_flags |= DSF_VISIT_DS_AGAIN;
} else {
SET_BOOKMARK(&scn_phys->scn_bookmark,
ZB_DESTROYED_OBJSET, 0, 0, 0);
zfs_dbgmsg("destroying ds %llu on %s; currently "
"traversing; reset bookmark to -1,0,0,0",
(u_longlong_t)ds->ds_object,
ds->ds_dir->dd_pool->dp_spa->spa_name);
}
}
}
/*
* Invoked when a dataset is destroyed. We need to make sure that:
*
* 1) If it is the dataset that was currently being scanned, we write
* a new dsl_scan_phys_t and marking the objset reference in it
* as destroyed.
* 2) Remove it from the work queue, if it was present.
*
* If the dataset was actually a snapshot, instead of marking the dataset
* as destroyed, we instead substitute the next snapshot in line.
*/
void
dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
dsl_scan_t *scn = dp->dp_scan;
uint64_t mintxg;
if (!dsl_scan_is_running(scn))
return;
ds_destroyed_scn_phys(ds, &scn->scn_phys);
ds_destroyed_scn_phys(ds, &scn->scn_phys_cached);
if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) {
scan_ds_queue_remove(scn, ds->ds_object);
if (ds->ds_is_snapshot)
scan_ds_queue_insert(scn,
dsl_dataset_phys(ds)->ds_next_snap_obj, mintxg);
}
if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
ds->ds_object, &mintxg) == 0) {
ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
if (ds->ds_is_snapshot) {
/*
* We keep the same mintxg; it could be >
* ds_creation_txg if the previous snapshot was
* deleted too.
*/
VERIFY(zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj,
dsl_dataset_phys(ds)->ds_next_snap_obj,
mintxg, tx) == 0);
zfs_dbgmsg("destroying ds %llu on %s; in queue; "
"replacing with %llu",
(u_longlong_t)ds->ds_object,
dp->dp_spa->spa_name,
(u_longlong_t)dsl_dataset_phys(ds)->
ds_next_snap_obj);
} else {
zfs_dbgmsg("destroying ds %llu on %s; in queue; "
"removing",
(u_longlong_t)ds->ds_object,
dp->dp_spa->spa_name);
}
}
/*
* dsl_scan_sync() should be called after this, and should sync
* out our changed state, but just to be safe, do it here.
*/
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
static void
ds_snapshotted_bookmark(dsl_dataset_t *ds, zbookmark_phys_t *scn_bookmark)
{
if (scn_bookmark->zb_objset == ds->ds_object) {
scn_bookmark->zb_objset =
dsl_dataset_phys(ds)->ds_prev_snap_obj;
zfs_dbgmsg("snapshotting ds %llu on %s; currently traversing; "
"reset zb_objset to %llu",
(u_longlong_t)ds->ds_object,
ds->ds_dir->dd_pool->dp_spa->spa_name,
(u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
}
}
/*
* Called when a dataset is snapshotted. If we were currently traversing
* this snapshot, we reset our bookmark to point at the newly created
* snapshot. We also modify our work queue to remove the old snapshot and
* replace with the new one.
*/
void
dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
dsl_scan_t *scn = dp->dp_scan;
uint64_t mintxg;
if (!dsl_scan_is_running(scn))
return;
ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0);
ds_snapshotted_bookmark(ds, &scn->scn_phys.scn_bookmark);
ds_snapshotted_bookmark(ds, &scn->scn_phys_cached.scn_bookmark);
if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) {
scan_ds_queue_remove(scn, ds->ds_object);
scan_ds_queue_insert(scn,
dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg);
}
if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
ds->ds_object, &mintxg) == 0) {
VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
VERIFY(zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj,
dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0);
zfs_dbgmsg("snapshotting ds %llu on %s; in queue; "
"replacing with %llu",
(u_longlong_t)ds->ds_object,
dp->dp_spa->spa_name,
(u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
}
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
static void
ds_clone_swapped_bookmark(dsl_dataset_t *ds1, dsl_dataset_t *ds2,
zbookmark_phys_t *scn_bookmark)
{
if (scn_bookmark->zb_objset == ds1->ds_object) {
scn_bookmark->zb_objset = ds2->ds_object;
zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; "
"reset zb_objset to %llu",
(u_longlong_t)ds1->ds_object,
ds1->ds_dir->dd_pool->dp_spa->spa_name,
(u_longlong_t)ds2->ds_object);
} else if (scn_bookmark->zb_objset == ds2->ds_object) {
scn_bookmark->zb_objset = ds1->ds_object;
zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; "
"reset zb_objset to %llu",
(u_longlong_t)ds2->ds_object,
ds2->ds_dir->dd_pool->dp_spa->spa_name,
(u_longlong_t)ds1->ds_object);
}
}
/*
* Called when an origin dataset and its clone are swapped. If we were
* currently traversing the dataset, we need to switch to traversing the
* newly promoted clone.
*/
void
dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx)
{
dsl_pool_t *dp = ds1->ds_dir->dd_pool;
dsl_scan_t *scn = dp->dp_scan;
uint64_t mintxg1, mintxg2;
boolean_t ds1_queued, ds2_queued;
if (!dsl_scan_is_running(scn))
return;
ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys.scn_bookmark);
ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys_cached.scn_bookmark);
/*
* Handle the in-memory scan queue.
*/
ds1_queued = scan_ds_queue_contains(scn, ds1->ds_object, &mintxg1);
ds2_queued = scan_ds_queue_contains(scn, ds2->ds_object, &mintxg2);
/* Sanity checking. */
if (ds1_queued) {
ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
}
if (ds2_queued) {
ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
}
if (ds1_queued && ds2_queued) {
/*
* If both are queued, we don't need to do anything.
* The swapping code below would not handle this case correctly,
* since we can't insert ds2 if it is already there. That's
* because scan_ds_queue_insert() prohibits a duplicate insert
* and panics.
*/
} else if (ds1_queued) {
scan_ds_queue_remove(scn, ds1->ds_object);
scan_ds_queue_insert(scn, ds2->ds_object, mintxg1);
} else if (ds2_queued) {
scan_ds_queue_remove(scn, ds2->ds_object);
scan_ds_queue_insert(scn, ds1->ds_object, mintxg2);
}
/*
* Handle the on-disk scan queue.
* The on-disk state is an out-of-date version of the in-memory state,
* so the in-memory and on-disk values for ds1_queued and ds2_queued may
* be different. Therefore we need to apply the swap logic to the
* on-disk state independently of the in-memory state.
*/
ds1_queued = zap_lookup_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds1->ds_object, &mintxg1) == 0;
ds2_queued = zap_lookup_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg2) == 0;
/* Sanity checking. */
if (ds1_queued) {
ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
}
if (ds2_queued) {
ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
}
if (ds1_queued && ds2_queued) {
/*
* If both are queued, we don't need to do anything.
* Alternatively, we could check for EEXIST from
* zap_add_int_key() and back out to the original state, but
* that would be more work than checking for this case upfront.
*/
} else if (ds1_queued) {
VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds1->ds_object, tx));
VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg1, tx));
zfs_dbgmsg("clone_swap ds %llu on %s; in queue; "
"replacing with %llu",
(u_longlong_t)ds1->ds_object,
dp->dp_spa->spa_name,
(u_longlong_t)ds2->ds_object);
} else if (ds2_queued) {
VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds2->ds_object, tx));
VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg2, tx));
zfs_dbgmsg("clone_swap ds %llu on %s; in queue; "
"replacing with %llu",
(u_longlong_t)ds2->ds_object,
dp->dp_spa->spa_name,
(u_longlong_t)ds1->ds_object);
}
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
static int
enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
{
uint64_t originobj = *(uint64_t *)arg;
dsl_dataset_t *ds;
int err;
dsl_scan_t *scn = dp->dp_scan;
if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != originobj)
return (0);
err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
if (err)
return (err);
while (dsl_dataset_phys(ds)->ds_prev_snap_obj != originobj) {
dsl_dataset_t *prev;
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
dsl_dataset_rele(ds, FTAG);
if (err)
return (err);
ds = prev;
}
scan_ds_queue_insert(scn, ds->ds_object,
dsl_dataset_phys(ds)->ds_prev_snap_txg);
dsl_dataset_rele(ds, FTAG);
return (0);
}
static void
dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
dsl_dataset_t *ds;
VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
if (scn->scn_phys.scn_cur_min_txg >=
scn->scn_phys.scn_max_txg) {
/*
* This can happen if this snapshot was created after the
* scan started, and we already completed a previous snapshot
* that was created after the scan started. This snapshot
* only references blocks with:
*
* birth < our ds_creation_txg
* cur_min_txg is no less than ds_creation_txg.
* We have already visited these blocks.
* or
* birth > scn_max_txg
* The scan requested not to visit these blocks.
*
* Subsequent snapshots (and clones) can reference our
* blocks, or blocks with even higher birth times.
* Therefore we do not need to visit them either,
* so we do not add them to the work queue.
*
* Note that checking for cur_min_txg >= cur_max_txg
* is not sufficient, because in that case we may need to
* visit subsequent snapshots. This happens when min_txg > 0,
* which raises cur_min_txg. In this case we will visit
* this dataset but skip all of its blocks, because the
* rootbp's birth time is < cur_min_txg. Then we will
* add the next snapshots/clones to the work queue.
*/
char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because "
"cur_min_txg (%llu) >= max_txg (%llu)",
(longlong_t)dsobj, dsname,
(longlong_t)scn->scn_phys.scn_cur_min_txg,
(longlong_t)scn->scn_phys.scn_max_txg);
kmem_free(dsname, MAXNAMELEN);
goto out;
}
/*
* Only the ZIL in the head (non-snapshot) is valid. Even though
* snapshots can have ZIL block pointers (which may be the same
* BP as in the head), they must be ignored. In addition, $ORIGIN
* doesn't have a objset (i.e. its ds_bp is a hole) so we don't
* need to look for a ZIL in it either. So we traverse the ZIL here,
* rather than in scan_recurse(), because the regular snapshot
* block-sharing rules don't apply to it.
*/
if (!dsl_dataset_is_snapshot(ds) &&
(dp->dp_origin_snap == NULL ||
ds->ds_dir != dp->dp_origin_snap->ds_dir)) {
objset_t *os;
if (dmu_objset_from_ds(ds, &os) != 0) {
goto out;
}
dsl_scan_zil(dp, &os->os_zil_header);
}
/*
* Iterate over the bps in this ds.
*/
dmu_buf_will_dirty(ds->ds_dbuf, tx);
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; "
"suspending=%u",
(longlong_t)dsobj, dsname,
(longlong_t)scn->scn_phys.scn_cur_min_txg,
(longlong_t)scn->scn_phys.scn_cur_max_txg,
(int)scn->scn_suspending);
kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN);
if (scn->scn_suspending)
goto out;
/*
* We've finished this pass over this dataset.
*/
/*
* If we did not completely visit this dataset, do another pass.
*/
if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) {
zfs_dbgmsg("incomplete pass on %s; visiting again",
dp->dp_spa->spa_name);
scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN;
scan_ds_queue_insert(scn, ds->ds_object,
scn->scn_phys.scn_cur_max_txg);
goto out;
}
/*
* Add descendant datasets to work queue.
*/
if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
scan_ds_queue_insert(scn,
dsl_dataset_phys(ds)->ds_next_snap_obj,
dsl_dataset_phys(ds)->ds_creation_txg);
}
if (dsl_dataset_phys(ds)->ds_num_children > 1) {
boolean_t usenext = B_FALSE;
if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
uint64_t count;
/*
* A bug in a previous version of the code could
* cause upgrade_clones_cb() to not set
* ds_next_snap_obj when it should, leading to a
* missing entry. Therefore we can only use the
* next_clones_obj when its count is correct.
*/
int err = zap_count(dp->dp_meta_objset,
dsl_dataset_phys(ds)->ds_next_clones_obj, &count);
if (err == 0 &&
count == dsl_dataset_phys(ds)->ds_num_children - 1)
usenext = B_TRUE;
}
if (usenext) {
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, dp->dp_meta_objset,
dsl_dataset_phys(ds)->ds_next_clones_obj);
zap_cursor_retrieve(&zc, &za) == 0;
(void) zap_cursor_advance(&zc)) {
scan_ds_queue_insert(scn,
zfs_strtonum(za.za_name, NULL),
dsl_dataset_phys(ds)->ds_creation_txg);
}
zap_cursor_fini(&zc);
} else {
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
enqueue_clones_cb, &ds->ds_object,
DS_FIND_CHILDREN));
}
}
out:
dsl_dataset_rele(ds, FTAG);
}
static int
enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
{
(void) arg;
dsl_dataset_t *ds;
int err;
dsl_scan_t *scn = dp->dp_scan;
err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
if (err)
return (err);
while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
dsl_dataset_t *prev;
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
if (err) {
dsl_dataset_rele(ds, FTAG);
return (err);
}
/*
* If this is a clone, we don't need to worry about it for now.
*/
if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) {
dsl_dataset_rele(ds, FTAG);
dsl_dataset_rele(prev, FTAG);
return (0);
}
dsl_dataset_rele(ds, FTAG);
ds = prev;
}
scan_ds_queue_insert(scn, ds->ds_object,
dsl_dataset_phys(ds)->ds_prev_snap_txg);
dsl_dataset_rele(ds, FTAG);
return (0);
}
void
dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum,
ddt_entry_t *dde, dmu_tx_t *tx)
{
(void) tx;
const ddt_key_t *ddk = &dde->dde_key;
ddt_phys_t *ddp = dde->dde_phys;
blkptr_t bp;
zbookmark_phys_t zb = { 0 };
if (!dsl_scan_is_running(scn))
return;
/*
* This function is special because it is the only thing
* that can add scan_io_t's to the vdev scan queues from
* outside dsl_scan_sync(). For the most part this is ok
* as long as it is called from within syncing context.
* However, dsl_scan_sync() expects that no new sio's will
* be added between when all the work for a scan is done
* and the next txg when the scan is actually marked as
* completed. This check ensures we do not issue new sio's
* during this period.
*/
if (scn->scn_done_txg != 0)
return;
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0 ||
ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg)
continue;
ddt_bp_create(checksum, ddk, ddp, &bp);
scn->scn_visited_this_txg++;
scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb);
}
}
/*
* Scrub/dedup interaction.
*
* If there are N references to a deduped block, we don't want to scrub it
* N times -- ideally, we should scrub it exactly once.
*
* We leverage the fact that the dde's replication class (enum ddt_class)
* is ordered from highest replication class (DDT_CLASS_DITTO) to lowest
* (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order.
*
* To prevent excess scrubbing, the scrub begins by walking the DDT
* to find all blocks with refcnt > 1, and scrubs each of these once.
* Since there are two replication classes which contain blocks with
* refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first.
* Finally the top-down scrub begins, only visiting blocks with refcnt == 1.
*
* There would be nothing more to say if a block's refcnt couldn't change
* during a scrub, but of course it can so we must account for changes
* in a block's replication class.
*
* Here's an example of what can occur:
*
* If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1
* when visited during the top-down scrub phase, it will be scrubbed twice.
* This negates our scrub optimization, but is otherwise harmless.
*
* If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1
* on each visit during the top-down scrub phase, it will never be scrubbed.
* To catch this, ddt_sync_entry() notifies the scrub code whenever a block's
* reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to
* DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1
* while a scrub is in progress, it scrubs the block right then.
*/
static void
dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx)
{
ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark;
ddt_entry_t dde = {{{{0}}}};
int error;
uint64_t n = 0;
while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) {
ddt_t *ddt;
if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max)
break;
dprintf("visiting ddb=%llu/%llu/%llu/%llx\n",
(longlong_t)ddb->ddb_class,
(longlong_t)ddb->ddb_type,
(longlong_t)ddb->ddb_checksum,
(longlong_t)ddb->ddb_cursor);
/* There should be no pending changes to the dedup table */
ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum];
ASSERT(avl_first(&ddt->ddt_tree) == NULL);
dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx);
n++;
if (dsl_scan_check_suspend(scn, NULL))
break;
}
zfs_dbgmsg("scanned %llu ddt entries on %s with class_max = %u; "
"suspending=%u", (longlong_t)n, scn->scn_dp->dp_spa->spa_name,
(int)scn->scn_phys.scn_ddt_class_max, (int)scn->scn_suspending);
ASSERT(error == 0 || error == ENOENT);
ASSERT(error != ENOENT ||
ddb->ddb_class > scn->scn_phys.scn_ddt_class_max);
}
static uint64_t
dsl_scan_ds_maxtxg(dsl_dataset_t *ds)
{
uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg;
if (ds->ds_is_snapshot)
return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg));
return (smt);
}
static void
dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx)
{
scan_ds_t *sds;
dsl_pool_t *dp = scn->scn_dp;
if (scn->scn_phys.scn_ddt_bookmark.ddb_class <=
scn->scn_phys.scn_ddt_class_max) {
scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
dsl_scan_ddt(scn, tx);
if (scn->scn_suspending)
return;
}
if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) {
/* First do the MOS & ORIGIN */
scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
dsl_scan_visit_rootbp(scn, NULL,
&dp->dp_meta_rootbp, tx);
spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
if (scn->scn_suspending)
return;
if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
enqueue_cb, NULL, DS_FIND_CHILDREN));
} else {
dsl_scan_visitds(scn,
dp->dp_origin_snap->ds_object, tx);
}
ASSERT(!scn->scn_suspending);
} else if (scn->scn_phys.scn_bookmark.zb_objset !=
ZB_DESTROYED_OBJSET) {
uint64_t dsobj = scn->scn_phys.scn_bookmark.zb_objset;
/*
* If we were suspended, continue from here. Note if the
* ds we were suspended on was deleted, the zb_objset may
* be -1, so we will skip this and find a new objset
* below.
*/
dsl_scan_visitds(scn, dsobj, tx);
if (scn->scn_suspending)
return;
}
/*
* In case we suspended right at the end of the ds, zero the
* bookmark so we don't think that we're still trying to resume.
*/
memset(&scn->scn_phys.scn_bookmark, 0, sizeof (zbookmark_phys_t));
/*
* Keep pulling things out of the dataset avl queue. Updates to the
* persistent zap-object-as-queue happen only at checkpoints.
*/
while ((sds = avl_first(&scn->scn_queue)) != NULL) {
dsl_dataset_t *ds;
uint64_t dsobj = sds->sds_dsobj;
uint64_t txg = sds->sds_txg;
/* dequeue and free the ds from the queue */
scan_ds_queue_remove(scn, dsobj);
sds = NULL;
/* set up min / max txg */
VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
if (txg != 0) {
scn->scn_phys.scn_cur_min_txg =
MAX(scn->scn_phys.scn_min_txg, txg);
} else {
scn->scn_phys.scn_cur_min_txg =
MAX(scn->scn_phys.scn_min_txg,
dsl_dataset_phys(ds)->ds_prev_snap_txg);
}
scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds);
dsl_dataset_rele(ds, FTAG);
dsl_scan_visitds(scn, dsobj, tx);
if (scn->scn_suspending)
return;
}
/* No more objsets to fetch, we're done */
scn->scn_phys.scn_bookmark.zb_objset = ZB_DESTROYED_OBJSET;
ASSERT0(scn->scn_suspending);
}
static uint64_t
dsl_scan_count_data_disks(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t i, leaves = 0;
for (i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
if (vd->vdev_islog || vd->vdev_isspare || vd->vdev_isl2cache)
continue;
leaves += vdev_get_ndisks(vd) - vdev_get_nparity(vd);
}
return (leaves);
}
static void
scan_io_queues_update_zio_stats(dsl_scan_io_queue_t *q, const blkptr_t *bp)
{
int i;
uint64_t cur_size = 0;
for (i = 0; i < BP_GET_NDVAS(bp); i++) {
cur_size += DVA_GET_ASIZE(&bp->blk_dva[i]);
}
q->q_total_zio_size_this_txg += cur_size;
q->q_zios_this_txg++;
}
static void
scan_io_queues_update_seg_stats(dsl_scan_io_queue_t *q, uint64_t start,
uint64_t end)
{
q->q_total_seg_size_this_txg += end - start;
q->q_segs_this_txg++;
}
static boolean_t
scan_io_queue_check_suspend(dsl_scan_t *scn)
{
/* See comment in dsl_scan_check_suspend() */
uint64_t curr_time_ns = gethrtime();
uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
uint64_t sync_time_ns = curr_time_ns -
scn->scn_dp->dp_spa->spa_sync_starttime;
uint64_t dirty_min_bytes = zfs_dirty_data_max *
zfs_vdev_async_write_active_min_dirty_percent / 100;
uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
return ((NSEC2MSEC(scan_time_ns) > mintime &&
(scn->scn_dp->dp_dirty_total >= dirty_min_bytes ||
txg_sync_waiting(scn->scn_dp) ||
NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
spa_shutting_down(scn->scn_dp->dp_spa));
}
/*
* Given a list of scan_io_t's in io_list, this issues the I/Os out to
* disk. This consumes the io_list and frees the scan_io_t's. This is
* called when emptying queues, either when we're up against the memory
* limit or when we have finished scanning. Returns B_TRUE if we stopped
* processing the list before we finished. Any sios that were not issued
* will remain in the io_list.
*/
static boolean_t
scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list)
{
dsl_scan_t *scn = queue->q_scn;
scan_io_t *sio;
boolean_t suspended = B_FALSE;
while ((sio = list_head(io_list)) != NULL) {
blkptr_t bp;
if (scan_io_queue_check_suspend(scn)) {
suspended = B_TRUE;
break;
}
sio2bp(sio, &bp);
scan_exec_io(scn->scn_dp, &bp, sio->sio_flags,
&sio->sio_zb, queue);
(void) list_remove_head(io_list);
scan_io_queues_update_zio_stats(queue, &bp);
sio_free(sio);
}
return (suspended);
}
/*
* This function removes sios from an IO queue which reside within a given
* range_seg_t and inserts them (in offset order) into a list. Note that
* we only ever return a maximum of 32 sios at once. If there are more sios
* to process within this segment that did not make it onto the list we
* return B_TRUE and otherwise B_FALSE.
*/
static boolean_t
scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list)
{
scan_io_t *srch_sio, *sio, *next_sio;
avl_index_t idx;
uint_t num_sios = 0;
int64_t bytes_issued = 0;
ASSERT(rs != NULL);
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
srch_sio = sio_alloc(1);
srch_sio->sio_nr_dvas = 1;
SIO_SET_OFFSET(srch_sio, rs_get_start(rs, queue->q_exts_by_addr));
/*
* The exact start of the extent might not contain any matching zios,
* so if that's the case, examine the next one in the tree.
*/
sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx);
sio_free(srch_sio);
if (sio == NULL)
sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER);
while (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs,
queue->q_exts_by_addr) && num_sios <= 32) {
ASSERT3U(SIO_GET_OFFSET(sio), >=, rs_get_start(rs,
queue->q_exts_by_addr));
ASSERT3U(SIO_GET_END_OFFSET(sio), <=, rs_get_end(rs,
queue->q_exts_by_addr));
next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio);
avl_remove(&queue->q_sios_by_addr, sio);
if (avl_is_empty(&queue->q_sios_by_addr))
atomic_add_64(&queue->q_scn->scn_queues_pending, -1);
queue->q_sio_memused -= SIO_GET_MUSED(sio);
bytes_issued += SIO_GET_ASIZE(sio);
num_sios++;
list_insert_tail(list, sio);
sio = next_sio;
}
/*
* We limit the number of sios we process at once to 32 to avoid
* biting off more than we can chew. If we didn't take everything
* in the segment we update it to reflect the work we were able to
* complete. Otherwise, we remove it from the range tree entirely.
*/
if (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs,
queue->q_exts_by_addr)) {
range_tree_adjust_fill(queue->q_exts_by_addr, rs,
-bytes_issued);
range_tree_resize_segment(queue->q_exts_by_addr, rs,
SIO_GET_OFFSET(sio), rs_get_end(rs,
queue->q_exts_by_addr) - SIO_GET_OFFSET(sio));
queue->q_last_ext_addr = SIO_GET_OFFSET(sio);
return (B_TRUE);
} else {
uint64_t rstart = rs_get_start(rs, queue->q_exts_by_addr);
uint64_t rend = rs_get_end(rs, queue->q_exts_by_addr);
range_tree_remove(queue->q_exts_by_addr, rstart, rend - rstart);
queue->q_last_ext_addr = -1;
return (B_FALSE);
}
}
/*
* This is called from the queue emptying thread and selects the next
* extent from which we are to issue I/Os. The behavior of this function
* depends on the state of the scan, the current memory consumption and
* whether or not we are performing a scan shutdown.
* 1) We select extents in an elevator algorithm (LBA-order) if the scan
* needs to perform a checkpoint
* 2) We select the largest available extent if we are up against the
* memory limit.
* 3) Otherwise we don't select any extents.
*/
static range_seg_t *
scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue)
{
dsl_scan_t *scn = queue->q_scn;
range_tree_t *rt = queue->q_exts_by_addr;
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
ASSERT(scn->scn_is_sorted);
if (!scn->scn_checkpointing && !scn->scn_clearing)
return (NULL);
/*
* During normal clearing, we want to issue our largest segments
* first, keeping IO as sequential as possible, and leaving the
* smaller extents for later with the hope that they might eventually
* grow to larger sequential segments. However, when the scan is
* checkpointing, no new extents will be added to the sorting queue,
* so the way we are sorted now is as good as it will ever get.
* In this case, we instead switch to issuing extents in LBA order.
*/
if ((zfs_scan_issue_strategy < 1 && scn->scn_checkpointing) ||
zfs_scan_issue_strategy == 1)
return (range_tree_first(rt));
/*
* Try to continue previous extent if it is not completed yet. After
* shrink in scan_io_queue_gather() it may no longer be the best, but
* otherwise we leave shorter remnant every txg.
*/
uint64_t start;
uint64_t size = 1ULL << rt->rt_shift;
range_seg_t *addr_rs;
if (queue->q_last_ext_addr != -1) {
start = queue->q_last_ext_addr;
addr_rs = range_tree_find(rt, start, size);
if (addr_rs != NULL)
return (addr_rs);
}
/*
* Nothing to continue, so find new best extent.
*/
uint64_t *v = zfs_btree_first(&queue->q_exts_by_size, NULL);
if (v == NULL)
return (NULL);
queue->q_last_ext_addr = start = *v << rt->rt_shift;
/*
* We need to get the original entry in the by_addr tree so we can
* modify it.
*/
addr_rs = range_tree_find(rt, start, size);
ASSERT3P(addr_rs, !=, NULL);
ASSERT3U(rs_get_start(addr_rs, rt), ==, start);
ASSERT3U(rs_get_end(addr_rs, rt), >, start);
return (addr_rs);
}
static void
scan_io_queues_run_one(void *arg)
{
dsl_scan_io_queue_t *queue = arg;
kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
boolean_t suspended = B_FALSE;
range_seg_t *rs;
scan_io_t *sio;
zio_t *zio;
list_t sio_list;
ASSERT(queue->q_scn->scn_is_sorted);
list_create(&sio_list, sizeof (scan_io_t),
offsetof(scan_io_t, sio_nodes.sio_list_node));
zio = zio_null(queue->q_scn->scn_zio_root, queue->q_scn->scn_dp->dp_spa,
NULL, NULL, NULL, ZIO_FLAG_CANFAIL);
mutex_enter(q_lock);
queue->q_zio = zio;
/* Calculate maximum in-flight bytes for this vdev. */
queue->q_maxinflight_bytes = MAX(1, zfs_scan_vdev_limit *
(vdev_get_ndisks(queue->q_vd) - vdev_get_nparity(queue->q_vd)));
/* reset per-queue scan statistics for this txg */
queue->q_total_seg_size_this_txg = 0;
queue->q_segs_this_txg = 0;
queue->q_total_zio_size_this_txg = 0;
queue->q_zios_this_txg = 0;
/* loop until we run out of time or sios */
while ((rs = scan_io_queue_fetch_ext(queue)) != NULL) {
uint64_t seg_start = 0, seg_end = 0;
boolean_t more_left;
ASSERT(list_is_empty(&sio_list));
/* loop while we still have sios left to process in this rs */
do {
scan_io_t *first_sio, *last_sio;
/*
* We have selected which extent needs to be
* processed next. Gather up the corresponding sios.
*/
more_left = scan_io_queue_gather(queue, rs, &sio_list);
ASSERT(!list_is_empty(&sio_list));
first_sio = list_head(&sio_list);
last_sio = list_tail(&sio_list);
seg_end = SIO_GET_END_OFFSET(last_sio);
if (seg_start == 0)
seg_start = SIO_GET_OFFSET(first_sio);
/*
* Issuing sios can take a long time so drop the
* queue lock. The sio queue won't be updated by
* other threads since we're in syncing context so
* we can be sure that our trees will remain exactly
* as we left them.
*/
mutex_exit(q_lock);
suspended = scan_io_queue_issue(queue, &sio_list);
mutex_enter(q_lock);
if (suspended)
break;
} while (more_left);
/* update statistics for debugging purposes */
scan_io_queues_update_seg_stats(queue, seg_start, seg_end);
if (suspended)
break;
}
/*
* If we were suspended in the middle of processing,
* requeue any unfinished sios and exit.
*/
while ((sio = list_remove_head(&sio_list)) != NULL)
scan_io_queue_insert_impl(queue, sio);
queue->q_zio = NULL;
mutex_exit(q_lock);
zio_nowait(zio);
list_destroy(&sio_list);
}
/*
* Performs an emptying run on all scan queues in the pool. This just
* punches out one thread per top-level vdev, each of which processes
* only that vdev's scan queue. We can parallelize the I/O here because
* we know that each queue's I/Os only affect its own top-level vdev.
*
* This function waits for the queue runs to complete, and must be
* called from dsl_scan_sync (or in general, syncing context).
*/
static void
scan_io_queues_run(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
ASSERT(scn->scn_is_sorted);
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (scn->scn_queues_pending == 0)
return;
if (scn->scn_taskq == NULL) {
int nthreads = spa->spa_root_vdev->vdev_children;
/*
* We need to make this taskq *always* execute as many
* threads in parallel as we have top-level vdevs and no
* less, otherwise strange serialization of the calls to
* scan_io_queues_run_one can occur during spa_sync runs
* and that significantly impacts performance.
*/
scn->scn_taskq = taskq_create("dsl_scan_iss", nthreads,
minclsyspri, nthreads, nthreads, TASKQ_PREPOPULATE);
}
for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
mutex_enter(&vd->vdev_scan_io_queue_lock);
if (vd->vdev_scan_io_queue != NULL) {
VERIFY(taskq_dispatch(scn->scn_taskq,
scan_io_queues_run_one, vd->vdev_scan_io_queue,
TQ_SLEEP) != TASKQID_INVALID);
}
mutex_exit(&vd->vdev_scan_io_queue_lock);
}
/*
* Wait for the queues to finish issuing their IOs for this run
* before we return. There may still be IOs in flight at this
* point.
*/
taskq_wait(scn->scn_taskq);
}
static boolean_t
dsl_scan_async_block_should_pause(dsl_scan_t *scn)
{
uint64_t elapsed_nanosecs;
if (zfs_recover)
return (B_FALSE);
if (zfs_async_block_max_blocks != 0 &&
scn->scn_visited_this_txg >= zfs_async_block_max_blocks) {
return (B_TRUE);
}
if (zfs_max_async_dedup_frees != 0 &&
scn->scn_dedup_frees_this_txg >= zfs_max_async_dedup_frees) {
return (B_TRUE);
}
elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time;
return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout ||
(NSEC2MSEC(elapsed_nanosecs) > scn->scn_async_block_min_time_ms &&
txg_sync_waiting(scn->scn_dp)) ||
spa_shutting_down(scn->scn_dp->dp_spa));
}
static int
dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
dsl_scan_t *scn = arg;
if (!scn->scn_is_bptree ||
(BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) {
if (dsl_scan_async_block_should_pause(scn))
return (SET_ERROR(ERESTART));
}
zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa,
dmu_tx_get_txg(tx), bp, 0));
dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
-bp_get_dsize_sync(scn->scn_dp->dp_spa, bp),
-BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
scn->scn_visited_this_txg++;
if (BP_GET_DEDUP(bp))
scn->scn_dedup_frees_this_txg++;
return (0);
}
static void
dsl_scan_update_stats(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
uint64_t i;
uint64_t seg_size_total = 0, zio_size_total = 0;
uint64_t seg_count_total = 0, zio_count_total = 0;
for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
dsl_scan_io_queue_t *queue = vd->vdev_scan_io_queue;
if (queue == NULL)
continue;
seg_size_total += queue->q_total_seg_size_this_txg;
zio_size_total += queue->q_total_zio_size_this_txg;
seg_count_total += queue->q_segs_this_txg;
zio_count_total += queue->q_zios_this_txg;
}
if (seg_count_total == 0 || zio_count_total == 0) {
scn->scn_avg_seg_size_this_txg = 0;
scn->scn_avg_zio_size_this_txg = 0;
scn->scn_segs_this_txg = 0;
scn->scn_zios_this_txg = 0;
return;
}
scn->scn_avg_seg_size_this_txg = seg_size_total / seg_count_total;
scn->scn_avg_zio_size_this_txg = zio_size_total / zio_count_total;
scn->scn_segs_this_txg = seg_count_total;
scn->scn_zios_this_txg = zio_count_total;
}
static int
bpobj_dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
return (dsl_scan_free_block_cb(arg, bp, tx));
}
static int
dsl_scan_obsolete_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
dsl_scan_t *scn = arg;
const dva_t *dva = &bp->blk_dva[0];
if (dsl_scan_async_block_should_pause(scn))
return (SET_ERROR(ERESTART));
spa_vdev_indirect_mark_obsolete(scn->scn_dp->dp_spa,
DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva),
DVA_GET_ASIZE(dva), tx);
scn->scn_visited_this_txg++;
return (0);
}
boolean_t
dsl_scan_active(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
uint64_t used = 0, comp, uncomp;
boolean_t clones_left;
if (spa->spa_load_state != SPA_LOAD_NONE)
return (B_FALSE);
if (spa_shutting_down(spa))
return (B_FALSE);
if ((dsl_scan_is_running(scn) && !dsl_scan_is_paused_scrub(scn)) ||
(scn->scn_async_destroying && !scn->scn_async_stalled))
return (B_TRUE);
if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
(void) bpobj_space(&scn->scn_dp->dp_free_bpobj,
&used, &comp, &uncomp);
}
clones_left = spa_livelist_delete_check(spa);
return ((used != 0) || (clones_left));
}
boolean_t
dsl_errorscrub_active(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
if (spa->spa_load_state != SPA_LOAD_NONE)
return (B_FALSE);
if (spa_shutting_down(spa))
return (B_FALSE);
if (dsl_errorscrubbing(scn->scn_dp))
return (B_TRUE);
return (B_FALSE);
}
static boolean_t
dsl_scan_check_deferred(vdev_t *vd)
{
boolean_t need_resilver = B_FALSE;
for (int c = 0; c < vd->vdev_children; c++) {
need_resilver |=
dsl_scan_check_deferred(vd->vdev_child[c]);
}
if (!vdev_is_concrete(vd) || vd->vdev_aux ||
!vd->vdev_ops->vdev_op_leaf)
return (need_resilver);
if (!vd->vdev_resilver_deferred)
need_resilver = B_TRUE;
return (need_resilver);
}
static boolean_t
dsl_scan_need_resilver(spa_t *spa, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
vdev_t *vd;
vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
if (vd->vdev_ops == &vdev_indirect_ops) {
/*
* The indirect vdev can point to multiple
* vdevs. For simplicity, always create
* the resilver zio_t. zio_vdev_io_start()
* will bypass the child resilver i/o's if
* they are on vdevs that don't have DTL's.
*/
return (B_TRUE);
}
if (DVA_GET_GANG(dva)) {
/*
* Gang members may be spread across multiple
* vdevs, so the best estimate we have is the
* scrub range, which has already been checked.
* XXX -- it would be better to change our
* allocation policy to ensure that all
* gang members reside on the same vdev.
*/
return (B_TRUE);
}
/*
* Check if the top-level vdev must resilver this offset.
* When the offset does not intersect with a dirty leaf DTL
* then it may be possible to skip the resilver IO. The psize
* is provided instead of asize to simplify the check for RAIDZ.
*/
if (!vdev_dtl_need_resilver(vd, dva, psize, phys_birth))
return (B_FALSE);
/*
* Check that this top-level vdev has a device under it which
* is resilvering and is not deferred.
*/
if (!dsl_scan_check_deferred(vd))
return (B_FALSE);
return (B_TRUE);
}
static int
dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx)
{
dsl_scan_t *scn = dp->dp_scan;
spa_t *spa = dp->dp_spa;
int err = 0;
if (spa_suspend_async_destroy(spa))
return (0);
if (zfs_free_bpobj_enabled &&
spa_version(spa) >= SPA_VERSION_DEADLISTS) {
scn->scn_is_bptree = B_FALSE;
scn->scn_async_block_min_time_ms = zfs_free_min_time_ms;
scn->scn_zio_root = zio_root(spa, NULL,
NULL, ZIO_FLAG_MUSTSUCCEED);
err = bpobj_iterate(&dp->dp_free_bpobj,
bpobj_dsl_scan_free_block_cb, scn, tx);
VERIFY0(zio_wait(scn->scn_zio_root));
scn->scn_zio_root = NULL;
if (err != 0 && err != ERESTART)
zfs_panic_recover("error %u from bpobj_iterate()", err);
}
if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
ASSERT(scn->scn_async_destroying);
scn->scn_is_bptree = B_TRUE;
scn->scn_zio_root = zio_root(spa, NULL,
NULL, ZIO_FLAG_MUSTSUCCEED);
err = bptree_iterate(dp->dp_meta_objset,
dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx);
VERIFY0(zio_wait(scn->scn_zio_root));
scn->scn_zio_root = NULL;
if (err == EIO || err == ECKSUM) {
err = 0;
} else if (err != 0 && err != ERESTART) {
zfs_panic_recover("error %u from "
"traverse_dataset_destroyed()", err);
}
if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) {
/* finished; deactivate async destroy feature */
spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx);
ASSERT(!spa_feature_is_active(spa,
SPA_FEATURE_ASYNC_DESTROY));
VERIFY0(zap_remove(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_BPTREE_OBJ, tx));
VERIFY0(bptree_free(dp->dp_meta_objset,
dp->dp_bptree_obj, tx));
dp->dp_bptree_obj = 0;
scn->scn_async_destroying = B_FALSE;
scn->scn_async_stalled = B_FALSE;
} else {
/*
* If we didn't make progress, mark the async
* destroy as stalled, so that we will not initiate
* a spa_sync() on its behalf. Note that we only
* check this if we are not finished, because if the
* bptree had no blocks for us to visit, we can
* finish without "making progress".
*/
scn->scn_async_stalled =
(scn->scn_visited_this_txg == 0);
}
}
if (scn->scn_visited_this_txg) {
zfs_dbgmsg("freed %llu blocks in %llums from "
"free_bpobj/bptree on %s in txg %llu; err=%u",
(longlong_t)scn->scn_visited_this_txg,
(longlong_t)
NSEC2MSEC(gethrtime() - scn->scn_sync_start_time),
spa->spa_name, (longlong_t)tx->tx_txg, err);
scn->scn_visited_this_txg = 0;
scn->scn_dedup_frees_this_txg = 0;
/*
* Write out changes to the DDT and the BRT that may be required
* as a result of the blocks freed. This ensures that the DDT
* and the BRT are clean when a scrub/resilver runs.
*/
ddt_sync(spa, tx->tx_txg);
brt_sync(spa, tx->tx_txg);
}
if (err != 0)
return (err);
if (dp->dp_free_dir != NULL && !scn->scn_async_destroying &&
zfs_free_leak_on_eio &&
(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 ||
dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 ||
dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) {
/*
* We have finished background destroying, but there is still
* some space left in the dp_free_dir. Transfer this leaked
* space to the dp_leak_dir.
*/
if (dp->dp_leak_dir == NULL) {
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
(void) dsl_dir_create_sync(dp, dp->dp_root_dir,
LEAK_DIR_NAME, tx);
VERIFY0(dsl_pool_open_special_dir(dp,
LEAK_DIR_NAME, &dp->dp_leak_dir));
rrw_exit(&dp->dp_config_rwlock, FTAG);
}
dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD,
dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
-dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
-dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
-dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
}
if (dp->dp_free_dir != NULL && !scn->scn_async_destroying &&
!spa_livelist_delete_check(spa)) {
/* finished; verify that space accounting went to zero */
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes);
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes);
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes);
}
spa_notify_waiters(spa);
EQUIV(bpobj_is_open(&dp->dp_obsolete_bpobj),
0 == zap_contains(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_OBSOLETE_BPOBJ));
if (err == 0 && bpobj_is_open(&dp->dp_obsolete_bpobj)) {
ASSERT(spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_OBSOLETE_COUNTS));
scn->scn_is_bptree = B_FALSE;
scn->scn_async_block_min_time_ms = zfs_obsolete_min_time_ms;
err = bpobj_iterate(&dp->dp_obsolete_bpobj,
dsl_scan_obsolete_block_cb, scn, tx);
if (err != 0 && err != ERESTART)
zfs_panic_recover("error %u from bpobj_iterate()", err);
if (bpobj_is_empty(&dp->dp_obsolete_bpobj))
dsl_pool_destroy_obsolete_bpobj(dp, tx);
}
return (0);
}
static void
name_to_bookmark(char *buf, zbookmark_phys_t *zb)
{
zb->zb_objset = zfs_strtonum(buf, &buf);
ASSERT(*buf == ':');
zb->zb_object = zfs_strtonum(buf + 1, &buf);
ASSERT(*buf == ':');
zb->zb_level = (int)zfs_strtonum(buf + 1, &buf);
ASSERT(*buf == ':');
zb->zb_blkid = zfs_strtonum(buf + 1, &buf);
ASSERT(*buf == '\0');
}
static void
name_to_object(char *buf, uint64_t *obj)
{
*obj = zfs_strtonum(buf, &buf);
ASSERT(*buf == '\0');
}
static void
read_by_block_level(dsl_scan_t *scn, zbookmark_phys_t zb)
{
dsl_pool_t *dp = scn->scn_dp;
dsl_dataset_t *ds;
objset_t *os;
if (dsl_dataset_hold_obj(dp, zb.zb_objset, FTAG, &ds) != 0)
return;
if (dmu_objset_from_ds(ds, &os) != 0) {
dsl_dataset_rele(ds, FTAG);
return;
}
/*
* If the key is not loaded dbuf_dnode_findbp() will error out with
* EACCES. However in that case dnode_hold() will eventually call
* dbuf_read()->zio_wait() which may call spa_log_error(). This will
* lead to a deadlock due to us holding the mutex spa_errlist_lock.
* Avoid this by checking here if the keys are loaded, if not return.
* If the keys are not loaded the head_errlog feature is meaningless
* as we cannot figure out the birth txg of the block pointer.
*/
if (dsl_dataset_get_keystatus(ds->ds_dir) ==
ZFS_KEYSTATUS_UNAVAILABLE) {
dsl_dataset_rele(ds, FTAG);
return;
}
dnode_t *dn;
blkptr_t bp;
if (dnode_hold(os, zb.zb_object, FTAG, &dn) != 0) {
dsl_dataset_rele(ds, FTAG);
return;
}
rw_enter(&dn->dn_struct_rwlock, RW_READER);
int error = dbuf_dnode_findbp(dn, zb.zb_level, zb.zb_blkid, &bp, NULL,
NULL);
if (error) {
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
dsl_dataset_rele(ds, FTAG);
return;
}
if (!error && BP_IS_HOLE(&bp)) {
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
dsl_dataset_rele(ds, FTAG);
return;
}
int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW |
ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB;
/* If it's an intent log block, failure is expected. */
if (zb.zb_level == ZB_ZIL_LEVEL)
zio_flags |= ZIO_FLAG_SPECULATIVE;
ASSERT(!BP_IS_EMBEDDED(&bp));
scan_exec_io(dp, &bp, zio_flags, &zb, NULL);
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
dsl_dataset_rele(ds, FTAG);
}
/*
* We keep track of the scrubbed error blocks in "count". This will be used
* when deciding whether we exceeded zfs_scrub_error_blocks_per_txg. This
* function is modelled after check_filesystem().
*/
static int
scrub_filesystem(spa_t *spa, uint64_t fs, zbookmark_err_phys_t *zep,
int *count)
{
dsl_dataset_t *ds;
dsl_pool_t *dp = spa->spa_dsl_pool;
dsl_scan_t *scn = dp->dp_scan;
int error = dsl_dataset_hold_obj(dp, fs, FTAG, &ds);
if (error != 0)
return (error);
uint64_t latest_txg;
uint64_t txg_to_consider = spa->spa_syncing_txg;
boolean_t check_snapshot = B_TRUE;
error = find_birth_txg(ds, zep, &latest_txg);
/*
* If find_birth_txg() errors out, then err on the side of caution and
* proceed. In worst case scenario scrub all objects. If zep->zb_birth
* is 0 (e.g. in case of encryption with unloaded keys) also proceed to
* scrub all objects.
*/
if (error == 0 && zep->zb_birth == latest_txg) {
/* Block neither free nor re written. */
zbookmark_phys_t zb;
zep_to_zb(fs, zep, &zb);
scn->scn_zio_root = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
/* We have already acquired the config lock for spa */
read_by_block_level(scn, zb);
(void) zio_wait(scn->scn_zio_root);
scn->scn_zio_root = NULL;
scn->errorscrub_phys.dep_examined++;
scn->errorscrub_phys.dep_to_examine--;
(*count)++;
if ((*count) == zfs_scrub_error_blocks_per_txg ||
dsl_error_scrub_check_suspend(scn, &zb)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EFAULT));
}
check_snapshot = B_FALSE;
} else if (error == 0) {
txg_to_consider = latest_txg;
}
/*
* Retrieve the number of snapshots if the dataset is not a snapshot.
*/
uint64_t snap_count = 0;
if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) {
error = zap_count(spa->spa_meta_objset,
dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
}
if (snap_count == 0) {
/* Filesystem without snapshots. */
dsl_dataset_rele(ds, FTAG);
return (0);
}
uint64_t snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
uint64_t snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
dsl_dataset_rele(ds, FTAG);
/* Check only snapshots created from this file system. */
while (snap_obj != 0 && zep->zb_birth < snap_obj_txg &&
snap_obj_txg <= txg_to_consider) {
error = dsl_dataset_hold_obj(dp, snap_obj, FTAG, &ds);
if (error != 0)
return (error);
if (dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj != fs) {
snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
dsl_dataset_rele(ds, FTAG);
continue;
}
boolean_t affected = B_TRUE;
if (check_snapshot) {
uint64_t blk_txg;
error = find_birth_txg(ds, zep, &blk_txg);
/*
* Scrub the snapshot also when zb_birth == 0 or when
* find_birth_txg() returns an error.
*/
affected = (error == 0 && zep->zb_birth == blk_txg) ||
(error != 0) || (zep->zb_birth == 0);
}
/* Scrub snapshots. */
if (affected) {
zbookmark_phys_t zb;
zep_to_zb(snap_obj, zep, &zb);
scn->scn_zio_root = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
/* We have already acquired the config lock for spa */
read_by_block_level(scn, zb);
(void) zio_wait(scn->scn_zio_root);
scn->scn_zio_root = NULL;
scn->errorscrub_phys.dep_examined++;
scn->errorscrub_phys.dep_to_examine--;
(*count)++;
if ((*count) == zfs_scrub_error_blocks_per_txg ||
dsl_error_scrub_check_suspend(scn, &zb)) {
dsl_dataset_rele(ds, FTAG);
return (EFAULT);
}
}
snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
dsl_dataset_rele(ds, FTAG);
}
return (0);
}
void
dsl_errorscrub_sync(dsl_pool_t *dp, dmu_tx_t *tx)
{
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
/*
* Only process scans in sync pass 1.
*/
if (spa_sync_pass(spa) > 1)
return;
/*
* If the spa is shutting down, then stop scanning. This will
* ensure that the scan does not dirty any new data during the
* shutdown phase.
*/
if (spa_shutting_down(spa))
return;
if (!dsl_errorscrub_active(scn) || dsl_errorscrub_is_paused(scn)) {
return;
}
if (dsl_scan_resilvering(scn->scn_dp)) {
/* cancel the error scrub if resilver started */
dsl_scan_cancel(scn->scn_dp);
return;
}
spa->spa_scrub_active = B_TRUE;
scn->scn_sync_start_time = gethrtime();
/*
* zfs_scan_suspend_progress can be set to disable scrub progress.
* See more detailed comment in dsl_scan_sync().
*/
if (zfs_scan_suspend_progress) {
uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time;
int mintime = zfs_scrub_min_time_ms;
while (zfs_scan_suspend_progress &&
!txg_sync_waiting(scn->scn_dp) &&
!spa_shutting_down(scn->scn_dp->dp_spa) &&
NSEC2MSEC(scan_time_ns) < mintime) {
delay(hz);
scan_time_ns = gethrtime() - scn->scn_sync_start_time;
}
return;
}
int i = 0;
zap_attribute_t *za;
zbookmark_phys_t *zb;
boolean_t limit_exceeded = B_FALSE;
za = kmem_zalloc(sizeof (zap_attribute_t), KM_SLEEP);
zb = kmem_zalloc(sizeof (zbookmark_phys_t), KM_SLEEP);
if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
for (; zap_cursor_retrieve(&scn->errorscrub_cursor, za) == 0;
zap_cursor_advance(&scn->errorscrub_cursor)) {
name_to_bookmark(za->za_name, zb);
scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
NULL, ZIO_FLAG_CANFAIL);
dsl_pool_config_enter(dp, FTAG);
read_by_block_level(scn, *zb);
dsl_pool_config_exit(dp, FTAG);
(void) zio_wait(scn->scn_zio_root);
scn->scn_zio_root = NULL;
scn->errorscrub_phys.dep_examined += 1;
scn->errorscrub_phys.dep_to_examine -= 1;
i++;
if (i == zfs_scrub_error_blocks_per_txg ||
dsl_error_scrub_check_suspend(scn, zb)) {
limit_exceeded = B_TRUE;
break;
}
}
if (!limit_exceeded)
dsl_errorscrub_done(scn, B_TRUE, tx);
dsl_errorscrub_sync_state(scn, tx);
kmem_free(za, sizeof (*za));
kmem_free(zb, sizeof (*zb));
return;
}
int error = 0;
for (; zap_cursor_retrieve(&scn->errorscrub_cursor, za) == 0;
zap_cursor_advance(&scn->errorscrub_cursor)) {
zap_cursor_t *head_ds_cursor;
zap_attribute_t *head_ds_attr;
zbookmark_err_phys_t head_ds_block;
head_ds_cursor = kmem_zalloc(sizeof (zap_cursor_t), KM_SLEEP);
head_ds_attr = kmem_zalloc(sizeof (zap_attribute_t), KM_SLEEP);
uint64_t head_ds_err_obj = za->za_first_integer;
uint64_t head_ds;
name_to_object(za->za_name, &head_ds);
boolean_t config_held = B_FALSE;
uint64_t top_affected_fs;
for (zap_cursor_init(head_ds_cursor, spa->spa_meta_objset,
head_ds_err_obj); zap_cursor_retrieve(head_ds_cursor,
head_ds_attr) == 0; zap_cursor_advance(head_ds_cursor)) {
name_to_errphys(head_ds_attr->za_name, &head_ds_block);
/*
* In case we are called from spa_sync the pool
* config is already held.
*/
if (!dsl_pool_config_held(dp)) {
dsl_pool_config_enter(dp, FTAG);
config_held = B_TRUE;
}
error = find_top_affected_fs(spa,
head_ds, &head_ds_block, &top_affected_fs);
if (error)
break;
error = scrub_filesystem(spa, top_affected_fs,
&head_ds_block, &i);
if (error == SET_ERROR(EFAULT)) {
limit_exceeded = B_TRUE;
break;
}
}
zap_cursor_fini(head_ds_cursor);
kmem_free(head_ds_cursor, sizeof (*head_ds_cursor));
kmem_free(head_ds_attr, sizeof (*head_ds_attr));
if (config_held)
dsl_pool_config_exit(dp, FTAG);
}
kmem_free(za, sizeof (*za));
kmem_free(zb, sizeof (*zb));
if (!limit_exceeded)
dsl_errorscrub_done(scn, B_TRUE, tx);
dsl_errorscrub_sync_state(scn, tx);
}
/*
* This is the primary entry point for scans that is called from syncing
* context. Scans must happen entirely during syncing context so that we
* can guarantee that blocks we are currently scanning will not change out
* from under us. While a scan is active, this function controls how quickly
* transaction groups proceed, instead of the normal handling provided by
* txg_sync_thread().
*/
void
dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
{
int err = 0;
dsl_scan_t *scn = dp->dp_scan;
spa_t *spa = dp->dp_spa;
state_sync_type_t sync_type = SYNC_OPTIONAL;
if (spa->spa_resilver_deferred &&
!spa_feature_is_active(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))
spa_feature_incr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
/*
* Check for scn_restart_txg before checking spa_load_state, so
* that we can restart an old-style scan while the pool is being
* imported (see dsl_scan_init). We also restart scans if there
* is a deferred resilver and the user has manually disabled
* deferred resilvers via the tunable.
*/
if (dsl_scan_restarting(scn, tx) ||
(spa->spa_resilver_deferred && zfs_resilver_disable_defer)) {
pool_scan_func_t func = POOL_SCAN_SCRUB;
dsl_scan_done(scn, B_FALSE, tx);
if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL))
func = POOL_SCAN_RESILVER;
zfs_dbgmsg("restarting scan func=%u on %s txg=%llu",
func, dp->dp_spa->spa_name, (longlong_t)tx->tx_txg);
dsl_scan_setup_sync(&func, tx);
}
/*
* Only process scans in sync pass 1.
*/
if (spa_sync_pass(spa) > 1)
return;
/*
* If the spa is shutting down, then stop scanning. This will
* ensure that the scan does not dirty any new data during the
* shutdown phase.
*/
if (spa_shutting_down(spa))
return;
/*
* If the scan is inactive due to a stalled async destroy, try again.
*/
if (!scn->scn_async_stalled && !dsl_scan_active(scn))
return;
/* reset scan statistics */
scn->scn_visited_this_txg = 0;
scn->scn_dedup_frees_this_txg = 0;
scn->scn_holes_this_txg = 0;
scn->scn_lt_min_this_txg = 0;
scn->scn_gt_max_this_txg = 0;
scn->scn_ddt_contained_this_txg = 0;
scn->scn_objsets_visited_this_txg = 0;
scn->scn_avg_seg_size_this_txg = 0;
scn->scn_segs_this_txg = 0;
scn->scn_avg_zio_size_this_txg = 0;
scn->scn_zios_this_txg = 0;
scn->scn_suspending = B_FALSE;
scn->scn_sync_start_time = gethrtime();
spa->spa_scrub_active = B_TRUE;
/*
* First process the async destroys. If we suspend, don't do
* any scrubbing or resilvering. This ensures that there are no
* async destroys while we are scanning, so the scan code doesn't
* have to worry about traversing it. It is also faster to free the
* blocks than to scrub them.
*/
err = dsl_process_async_destroys(dp, tx);
if (err != 0)
return;
if (!dsl_scan_is_running(scn) || dsl_scan_is_paused_scrub(scn))
return;
/*
* Wait a few txgs after importing to begin scanning so that
* we can get the pool imported quickly.
*/
if (spa->spa_syncing_txg < spa->spa_first_txg + SCAN_IMPORT_WAIT_TXGS)
return;
/*
* zfs_scan_suspend_progress can be set to disable scan progress.
* We don't want to spin the txg_sync thread, so we add a delay
* here to simulate the time spent doing a scan. This is mostly
* useful for testing and debugging.
*/
if (zfs_scan_suspend_progress) {
uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time;
uint_t mintime = (scn->scn_phys.scn_func ==
POOL_SCAN_RESILVER) ? zfs_resilver_min_time_ms :
zfs_scrub_min_time_ms;
while (zfs_scan_suspend_progress &&
!txg_sync_waiting(scn->scn_dp) &&
!spa_shutting_down(scn->scn_dp->dp_spa) &&
NSEC2MSEC(scan_time_ns) < mintime) {
delay(hz);
scan_time_ns = gethrtime() - scn->scn_sync_start_time;
}
return;
}
/*
* Disabled by default, set zfs_scan_report_txgs to report
* average performance over the last zfs_scan_report_txgs TXGs.
*/
if (zfs_scan_report_txgs != 0 &&
tx->tx_txg % zfs_scan_report_txgs == 0) {
scn->scn_issued_before_pass += spa->spa_scan_pass_issued;
spa_scan_stat_init(spa);
}
/*
* It is possible to switch from unsorted to sorted at any time,
* but afterwards the scan will remain sorted unless reloaded from
* a checkpoint after a reboot.
*/
if (!zfs_scan_legacy) {
scn->scn_is_sorted = B_TRUE;
if (scn->scn_last_checkpoint == 0)
scn->scn_last_checkpoint = ddi_get_lbolt();
}
/*
* For sorted scans, determine what kind of work we will be doing
* this txg based on our memory limitations and whether or not we
* need to perform a checkpoint.
*/
if (scn->scn_is_sorted) {
/*
* If we are over our checkpoint interval, set scn_clearing
* so that we can begin checkpointing immediately. The
* checkpoint allows us to save a consistent bookmark
* representing how much data we have scrubbed so far.
* Otherwise, use the memory limit to determine if we should
* scan for metadata or start issue scrub IOs. We accumulate
* metadata until we hit our hard memory limit at which point
* we issue scrub IOs until we are at our soft memory limit.
*/
if (scn->scn_checkpointing ||
ddi_get_lbolt() - scn->scn_last_checkpoint >
SEC_TO_TICK(zfs_scan_checkpoint_intval)) {
if (!scn->scn_checkpointing)
zfs_dbgmsg("begin scan checkpoint for %s",
spa->spa_name);
scn->scn_checkpointing = B_TRUE;
scn->scn_clearing = B_TRUE;
} else {
boolean_t should_clear = dsl_scan_should_clear(scn);
if (should_clear && !scn->scn_clearing) {
zfs_dbgmsg("begin scan clearing for %s",
spa->spa_name);
scn->scn_clearing = B_TRUE;
} else if (!should_clear && scn->scn_clearing) {
zfs_dbgmsg("finish scan clearing for %s",
spa->spa_name);
scn->scn_clearing = B_FALSE;
}
}
} else {
ASSERT0(scn->scn_checkpointing);
ASSERT0(scn->scn_clearing);
}
if (!scn->scn_clearing && scn->scn_done_txg == 0) {
/* Need to scan metadata for more blocks to scrub */
dsl_scan_phys_t *scnp = &scn->scn_phys;
taskqid_t prefetch_tqid;
/*
* Calculate the max number of in-flight bytes for pool-wide
* scanning operations (minimum 1MB, maximum 1/4 of arc_c_max).
* Limits for the issuing phase are done per top-level vdev and
* are handled separately.
*/
scn->scn_maxinflight_bytes = MIN(arc_c_max / 4, MAX(1ULL << 20,
zfs_scan_vdev_limit * dsl_scan_count_data_disks(spa)));
if (scnp->scn_ddt_bookmark.ddb_class <=
scnp->scn_ddt_class_max) {
ASSERT(ZB_IS_ZERO(&scnp->scn_bookmark));
zfs_dbgmsg("doing scan sync for %s txg %llu; "
"ddt bm=%llu/%llu/%llu/%llx",
spa->spa_name,
(longlong_t)tx->tx_txg,
(longlong_t)scnp->scn_ddt_bookmark.ddb_class,
(longlong_t)scnp->scn_ddt_bookmark.ddb_type,
(longlong_t)scnp->scn_ddt_bookmark.ddb_checksum,
(longlong_t)scnp->scn_ddt_bookmark.ddb_cursor);
} else {
zfs_dbgmsg("doing scan sync for %s txg %llu; "
"bm=%llu/%llu/%llu/%llu",
spa->spa_name,
(longlong_t)tx->tx_txg,
(longlong_t)scnp->scn_bookmark.zb_objset,
(longlong_t)scnp->scn_bookmark.zb_object,
(longlong_t)scnp->scn_bookmark.zb_level,
(longlong_t)scnp->scn_bookmark.zb_blkid);
}
scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
NULL, ZIO_FLAG_CANFAIL);
scn->scn_prefetch_stop = B_FALSE;
prefetch_tqid = taskq_dispatch(dp->dp_sync_taskq,
dsl_scan_prefetch_thread, scn, TQ_SLEEP);
ASSERT(prefetch_tqid != TASKQID_INVALID);
dsl_pool_config_enter(dp, FTAG);
dsl_scan_visit(scn, tx);
dsl_pool_config_exit(dp, FTAG);
mutex_enter(&dp->dp_spa->spa_scrub_lock);
scn->scn_prefetch_stop = B_TRUE;
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&dp->dp_spa->spa_scrub_lock);
taskq_wait_id(dp->dp_sync_taskq, prefetch_tqid);
(void) zio_wait(scn->scn_zio_root);
scn->scn_zio_root = NULL;
zfs_dbgmsg("scan visited %llu blocks of %s in %llums "
"(%llu os's, %llu holes, %llu < mintxg, "
"%llu in ddt, %llu > maxtxg)",
(longlong_t)scn->scn_visited_this_txg,
spa->spa_name,
(longlong_t)NSEC2MSEC(gethrtime() -
scn->scn_sync_start_time),
(longlong_t)scn->scn_objsets_visited_this_txg,
(longlong_t)scn->scn_holes_this_txg,
(longlong_t)scn->scn_lt_min_this_txg,
(longlong_t)scn->scn_ddt_contained_this_txg,
(longlong_t)scn->scn_gt_max_this_txg);
if (!scn->scn_suspending) {
ASSERT0(avl_numnodes(&scn->scn_queue));
scn->scn_done_txg = tx->tx_txg + 1;
if (scn->scn_is_sorted) {
scn->scn_checkpointing = B_TRUE;
scn->scn_clearing = B_TRUE;
scn->scn_issued_before_pass +=
spa->spa_scan_pass_issued;
spa_scan_stat_init(spa);
}
zfs_dbgmsg("scan complete for %s txg %llu",
spa->spa_name,
(longlong_t)tx->tx_txg);
}
} else if (scn->scn_is_sorted && scn->scn_queues_pending != 0) {
ASSERT(scn->scn_clearing);
/* need to issue scrubbing IOs from per-vdev queues */
scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
NULL, ZIO_FLAG_CANFAIL);
scan_io_queues_run(scn);
(void) zio_wait(scn->scn_zio_root);
scn->scn_zio_root = NULL;
/* calculate and dprintf the current memory usage */
(void) dsl_scan_should_clear(scn);
dsl_scan_update_stats(scn);
zfs_dbgmsg("scan issued %llu blocks for %s (%llu segs) "
"in %llums (avg_block_size = %llu, avg_seg_size = %llu)",
(longlong_t)scn->scn_zios_this_txg,
spa->spa_name,
(longlong_t)scn->scn_segs_this_txg,
(longlong_t)NSEC2MSEC(gethrtime() -
scn->scn_sync_start_time),
(longlong_t)scn->scn_avg_zio_size_this_txg,
(longlong_t)scn->scn_avg_seg_size_this_txg);
} else if (scn->scn_done_txg != 0 && scn->scn_done_txg <= tx->tx_txg) {
/* Finished with everything. Mark the scrub as complete */
zfs_dbgmsg("scan issuing complete txg %llu for %s",
(longlong_t)tx->tx_txg,
spa->spa_name);
ASSERT3U(scn->scn_done_txg, !=, 0);
ASSERT0(spa->spa_scrub_inflight);
ASSERT0(scn->scn_queues_pending);
dsl_scan_done(scn, B_TRUE, tx);
sync_type = SYNC_MANDATORY;
}
dsl_scan_sync_state(scn, tx, sync_type);
}
static void
count_block_issued(spa_t *spa, const blkptr_t *bp, boolean_t all)
{
/*
* Don't count embedded bp's, since we already did the work of
* scanning these when we scanned the containing block.
*/
if (BP_IS_EMBEDDED(bp))
return;
/*
* Update the spa's stats on how many bytes we have issued.
* Sequential scrubs create a zio for each DVA of the bp. Each
* of these will include all DVAs for repair purposes, but the
* zio code will only try the first one unless there is an issue.
* Therefore, we should only count the first DVA for these IOs.
*/
atomic_add_64(&spa->spa_scan_pass_issued,
all ? BP_GET_ASIZE(bp) : DVA_GET_ASIZE(&bp->blk_dva[0]));
}
static void
count_block_skipped(dsl_scan_t *scn, const blkptr_t *bp, boolean_t all)
{
if (BP_IS_EMBEDDED(bp))
return;
atomic_add_64(&scn->scn_phys.scn_skipped,
all ? BP_GET_ASIZE(bp) : DVA_GET_ASIZE(&bp->blk_dva[0]));
}
static void
count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp)
{
/*
* If we resume after a reboot, zab will be NULL; don't record
* incomplete stats in that case.
*/
if (zab == NULL)
return;
for (int i = 0; i < 4; i++) {
int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;
if (t & DMU_OT_NEWTYPE)
t = DMU_OT_OTHER;
zfs_blkstat_t *zb = &zab->zab_type[l][t];
int equal;
zb->zb_count++;
zb->zb_asize += BP_GET_ASIZE(bp);
zb->zb_lsize += BP_GET_LSIZE(bp);
zb->zb_psize += BP_GET_PSIZE(bp);
zb->zb_gangs += BP_COUNT_GANG(bp);
switch (BP_GET_NDVAS(bp)) {
case 2:
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1]))
zb->zb_ditto_2_of_2_samevdev++;
break;
case 3:
equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1])) +
(DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[2])) +
(DVA_GET_VDEV(&bp->blk_dva[1]) ==
DVA_GET_VDEV(&bp->blk_dva[2]));
if (equal == 1)
zb->zb_ditto_2_of_3_samevdev++;
else if (equal == 3)
zb->zb_ditto_3_of_3_samevdev++;
break;
}
}
}
static void
scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio)
{
avl_index_t idx;
dsl_scan_t *scn = queue->q_scn;
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
if (unlikely(avl_is_empty(&queue->q_sios_by_addr)))
atomic_add_64(&scn->scn_queues_pending, 1);
if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) {
/* block is already scheduled for reading */
sio_free(sio);
return;
}
avl_insert(&queue->q_sios_by_addr, sio, idx);
queue->q_sio_memused += SIO_GET_MUSED(sio);
range_tree_add(queue->q_exts_by_addr, SIO_GET_OFFSET(sio),
SIO_GET_ASIZE(sio));
}
/*
* Given all the info we got from our metadata scanning process, we
* construct a scan_io_t and insert it into the scan sorting queue. The
* I/O must already be suitable for us to process. This is controlled
* by dsl_scan_enqueue().
*/
static void
scan_io_queue_insert(dsl_scan_io_queue_t *queue, const blkptr_t *bp, int dva_i,
int zio_flags, const zbookmark_phys_t *zb)
{
scan_io_t *sio = sio_alloc(BP_GET_NDVAS(bp));
ASSERT0(BP_IS_GANG(bp));
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
bp2sio(bp, sio, dva_i);
sio->sio_flags = zio_flags;
sio->sio_zb = *zb;
queue->q_last_ext_addr = -1;
scan_io_queue_insert_impl(queue, sio);
}
/*
* Given a set of I/O parameters as discovered by the metadata traversal
* process, attempts to place the I/O into the sorted queues (if allowed),
* or immediately executes the I/O.
*/
static void
dsl_scan_enqueue(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
const zbookmark_phys_t *zb)
{
spa_t *spa = dp->dp_spa;
ASSERT(!BP_IS_EMBEDDED(bp));
/*
* Gang blocks are hard to issue sequentially, so we just issue them
* here immediately instead of queuing them.
*/
if (!dp->dp_scan->scn_is_sorted || BP_IS_GANG(bp)) {
scan_exec_io(dp, bp, zio_flags, zb, NULL);
return;
}
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
dva_t dva;
vdev_t *vdev;
dva = bp->blk_dva[i];
vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&dva));
ASSERT(vdev != NULL);
mutex_enter(&vdev->vdev_scan_io_queue_lock);
if (vdev->vdev_scan_io_queue == NULL)
vdev->vdev_scan_io_queue = scan_io_queue_create(vdev);
ASSERT(dp->dp_scan != NULL);
scan_io_queue_insert(vdev->vdev_scan_io_queue, bp,
i, zio_flags, zb);
mutex_exit(&vdev->vdev_scan_io_queue_lock);
}
}
static int
dsl_scan_scrub_cb(dsl_pool_t *dp,
const blkptr_t *bp, const zbookmark_phys_t *zb)
{
dsl_scan_t *scn = dp->dp_scan;
spa_t *spa = dp->dp_spa;
uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp);
size_t psize = BP_GET_PSIZE(bp);
boolean_t needs_io = B_FALSE;
int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;
count_block(dp->dp_blkstats, bp);
if (phys_birth <= scn->scn_phys.scn_min_txg ||
phys_birth >= scn->scn_phys.scn_max_txg) {
count_block_skipped(scn, bp, B_TRUE);
return (0);
}
/* Embedded BP's have phys_birth==0, so we reject them above. */
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn));
if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) {
zio_flags |= ZIO_FLAG_SCRUB;
needs_io = B_TRUE;
} else {
ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER);
zio_flags |= ZIO_FLAG_RESILVER;
needs_io = B_FALSE;
}
/* If it's an intent log block, failure is expected. */
if (zb->zb_level == ZB_ZIL_LEVEL)
zio_flags |= ZIO_FLAG_SPECULATIVE;
for (int d = 0; d < BP_GET_NDVAS(bp); d++) {
const dva_t *dva = &bp->blk_dva[d];
/*
* Keep track of how much data we've examined so that
* zpool(8) status can make useful progress reports.
*/
uint64_t asize = DVA_GET_ASIZE(dva);
scn->scn_phys.scn_examined += asize;
spa->spa_scan_pass_exam += asize;
/* if it's a resilver, this may not be in the target range */
if (!needs_io)
needs_io = dsl_scan_need_resilver(spa, dva, psize,
phys_birth);
}
if (needs_io && !zfs_no_scrub_io) {
dsl_scan_enqueue(dp, bp, zio_flags, zb);
} else {
count_block_skipped(scn, bp, B_TRUE);
}
/* do not relocate this block */
return (0);
}
static void
dsl_scan_scrub_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
dsl_scan_io_queue_t *queue = zio->io_private;
abd_free(zio->io_abd);
if (queue == NULL) {
mutex_enter(&spa->spa_scrub_lock);
ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp));
spa->spa_scrub_inflight -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
} else {
mutex_enter(&queue->q_vd->vdev_scan_io_queue_lock);
ASSERT3U(queue->q_inflight_bytes, >=, BP_GET_PSIZE(bp));
queue->q_inflight_bytes -= BP_GET_PSIZE(bp);
cv_broadcast(&queue->q_zio_cv);
mutex_exit(&queue->q_vd->vdev_scan_io_queue_lock);
}
if (zio->io_error && (zio->io_error != ECKSUM ||
!(zio->io_flags & ZIO_FLAG_SPECULATIVE))) {
if (dsl_errorscrubbing(spa->spa_dsl_pool) &&
!dsl_errorscrub_is_paused(spa->spa_dsl_pool->dp_scan)) {
atomic_inc_64(&spa->spa_dsl_pool->dp_scan
->errorscrub_phys.dep_errors);
} else {
atomic_inc_64(&spa->spa_dsl_pool->dp_scan->scn_phys
.scn_errors);
}
}
}
/*
* Given a scanning zio's information, executes the zio. The zio need
* not necessarily be only sortable, this function simply executes the
* zio, no matter what it is. The optional queue argument allows the
* caller to specify that they want per top level vdev IO rate limiting
* instead of the legacy global limiting.
*/
static void
scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue)
{
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
size_t size = BP_GET_PSIZE(bp);
abd_t *data = abd_alloc_for_io(size, B_FALSE);
zio_t *pio;
if (queue == NULL) {
ASSERT3U(scn->scn_maxinflight_bytes, >, 0);
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_scrub_inflight += BP_GET_PSIZE(bp);
mutex_exit(&spa->spa_scrub_lock);
pio = scn->scn_zio_root;
} else {
kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
ASSERT3U(queue->q_maxinflight_bytes, >, 0);
mutex_enter(q_lock);
while (queue->q_inflight_bytes >= queue->q_maxinflight_bytes)
cv_wait(&queue->q_zio_cv, q_lock);
queue->q_inflight_bytes += BP_GET_PSIZE(bp);
pio = queue->q_zio;
mutex_exit(q_lock);
}
ASSERT(pio != NULL);
count_block_issued(spa, bp, queue == NULL);
zio_nowait(zio_read(pio, spa, bp, data, size, dsl_scan_scrub_done,
queue, ZIO_PRIORITY_SCRUB, zio_flags, zb));
}
/*
* This is the primary extent sorting algorithm. We balance two parameters:
* 1) how many bytes of I/O are in an extent
* 2) how well the extent is filled with I/O (as a fraction of its total size)
* Since we allow extents to have gaps between their constituent I/Os, it's
* possible to have a fairly large extent that contains the same amount of
* I/O bytes than a much smaller extent, which just packs the I/O more tightly.
* The algorithm sorts based on a score calculated from the extent's size,
* the relative fill volume (in %) and a "fill weight" parameter that controls
* the split between whether we prefer larger extents or more well populated
* extents:
*
* SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT)
*
* Example:
* 1) assume extsz = 64 MiB
* 2) assume fill = 32 MiB (extent is half full)
* 3) assume fill_weight = 3
* 4) SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100
* SCORE = 32M + (50 * 3 * 32M) / 100
* SCORE = 32M + (4800M / 100)
* SCORE = 32M + 48M
* ^ ^
* | +--- final total relative fill-based score
* +--------- final total fill-based score
* SCORE = 80M
*
* As can be seen, at fill_ratio=3, the algorithm is slightly biased towards
* extents that are more completely filled (in a 3:2 ratio) vs just larger.
* Note that as an optimization, we replace multiplication and division by
* 100 with bitshifting by 7 (which effectively multiplies and divides by 128).
*
* Since we do not care if one extent is only few percent better than another,
* compress the score into 6 bits via binary logarithm AKA highbit64() and
* put into otherwise unused due to ashift high bits of offset. This allows
* to reduce q_exts_by_size B-tree elements to only 64 bits and compare them
* with single operation. Plus it makes scrubs more sequential and reduces
* chances that minor extent change move it within the B-tree.
*/
__attribute__((always_inline)) inline
static int
ext_size_compare(const void *x, const void *y)
{
const uint64_t *a = x, *b = y;
return (TREE_CMP(*a, *b));
}
ZFS_BTREE_FIND_IN_BUF_FUNC(ext_size_find_in_buf, uint64_t,
ext_size_compare)
static void
ext_size_create(range_tree_t *rt, void *arg)
{
(void) rt;
zfs_btree_t *size_tree = arg;
zfs_btree_create(size_tree, ext_size_compare, ext_size_find_in_buf,
sizeof (uint64_t));
}
static void
ext_size_destroy(range_tree_t *rt, void *arg)
{
(void) rt;
zfs_btree_t *size_tree = arg;
ASSERT0(zfs_btree_numnodes(size_tree));
zfs_btree_destroy(size_tree);
}
static uint64_t
ext_size_value(range_tree_t *rt, range_seg_gap_t *rsg)
{
(void) rt;
uint64_t size = rsg->rs_end - rsg->rs_start;
uint64_t score = rsg->rs_fill + ((((rsg->rs_fill << 7) / size) *
fill_weight * rsg->rs_fill) >> 7);
ASSERT3U(rt->rt_shift, >=, 8);
return (((uint64_t)(64 - highbit64(score)) << 56) | rsg->rs_start);
}
static void
ext_size_add(range_tree_t *rt, range_seg_t *rs, void *arg)
{
zfs_btree_t *size_tree = arg;
ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP);
uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs);
zfs_btree_add(size_tree, &v);
}
static void
ext_size_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
{
zfs_btree_t *size_tree = arg;
ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP);
uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs);
zfs_btree_remove(size_tree, &v);
}
static void
ext_size_vacate(range_tree_t *rt, void *arg)
{
zfs_btree_t *size_tree = arg;
zfs_btree_clear(size_tree);
zfs_btree_destroy(size_tree);
ext_size_create(rt, arg);
}
static const range_tree_ops_t ext_size_ops = {
.rtop_create = ext_size_create,
.rtop_destroy = ext_size_destroy,
.rtop_add = ext_size_add,
.rtop_remove = ext_size_remove,
.rtop_vacate = ext_size_vacate
};
/*
* Comparator for the q_sios_by_addr tree. Sorting is simply performed
* based on LBA-order (from lowest to highest).
*/
static int
sio_addr_compare(const void *x, const void *y)
{
const scan_io_t *a = x, *b = y;
return (TREE_CMP(SIO_GET_OFFSET(a), SIO_GET_OFFSET(b)));
}
/* IO queues are created on demand when they are needed. */
static dsl_scan_io_queue_t *
scan_io_queue_create(vdev_t *vd)
{
dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan;
dsl_scan_io_queue_t *q = kmem_zalloc(sizeof (*q), KM_SLEEP);
q->q_scn = scn;
q->q_vd = vd;
q->q_sio_memused = 0;
q->q_last_ext_addr = -1;
cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL);
q->q_exts_by_addr = range_tree_create_gap(&ext_size_ops, RANGE_SEG_GAP,
&q->q_exts_by_size, 0, vd->vdev_ashift, zfs_scan_max_ext_gap);
avl_create(&q->q_sios_by_addr, sio_addr_compare,
sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node));
return (q);
}
/*
* Destroys a scan queue and all segments and scan_io_t's contained in it.
* No further execution of I/O occurs, anything pending in the queue is
* simply freed without being executed.
*/
void
dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue)
{
dsl_scan_t *scn = queue->q_scn;
scan_io_t *sio;
void *cookie = NULL;
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
if (!avl_is_empty(&queue->q_sios_by_addr))
atomic_add_64(&scn->scn_queues_pending, -1);
while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) !=
NULL) {
ASSERT(range_tree_contains(queue->q_exts_by_addr,
SIO_GET_OFFSET(sio), SIO_GET_ASIZE(sio)));
queue->q_sio_memused -= SIO_GET_MUSED(sio);
sio_free(sio);
}
ASSERT0(queue->q_sio_memused);
range_tree_vacate(queue->q_exts_by_addr, NULL, queue);
range_tree_destroy(queue->q_exts_by_addr);
avl_destroy(&queue->q_sios_by_addr);
cv_destroy(&queue->q_zio_cv);
kmem_free(queue, sizeof (*queue));
}
/*
* Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is
* called on behalf of vdev_top_transfer when creating or destroying
* a mirror vdev due to zpool attach/detach.
*/
void
dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd)
{
mutex_enter(&svd->vdev_scan_io_queue_lock);
mutex_enter(&tvd->vdev_scan_io_queue_lock);
VERIFY3P(tvd->vdev_scan_io_queue, ==, NULL);
tvd->vdev_scan_io_queue = svd->vdev_scan_io_queue;
svd->vdev_scan_io_queue = NULL;
if (tvd->vdev_scan_io_queue != NULL)
tvd->vdev_scan_io_queue->q_vd = tvd;
mutex_exit(&tvd->vdev_scan_io_queue_lock);
mutex_exit(&svd->vdev_scan_io_queue_lock);
}
static void
scan_io_queues_destroy(dsl_scan_t *scn)
{
vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *tvd = rvd->vdev_child[i];
mutex_enter(&tvd->vdev_scan_io_queue_lock);
if (tvd->vdev_scan_io_queue != NULL)
dsl_scan_io_queue_destroy(tvd->vdev_scan_io_queue);
tvd->vdev_scan_io_queue = NULL;
mutex_exit(&tvd->vdev_scan_io_queue_lock);
}
}
static void
dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i)
{
dsl_pool_t *dp = spa->spa_dsl_pool;
dsl_scan_t *scn = dp->dp_scan;
vdev_t *vdev;
kmutex_t *q_lock;
dsl_scan_io_queue_t *queue;
scan_io_t *srch_sio, *sio;
avl_index_t idx;
uint64_t start, size;
vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[dva_i]));
ASSERT(vdev != NULL);
q_lock = &vdev->vdev_scan_io_queue_lock;
queue = vdev->vdev_scan_io_queue;
mutex_enter(q_lock);
if (queue == NULL) {
mutex_exit(q_lock);
return;
}
srch_sio = sio_alloc(BP_GET_NDVAS(bp));
bp2sio(bp, srch_sio, dva_i);
start = SIO_GET_OFFSET(srch_sio);
size = SIO_GET_ASIZE(srch_sio);
/*
* We can find the zio in two states:
* 1) Cold, just sitting in the queue of zio's to be issued at
* some point in the future. In this case, all we do is
* remove the zio from the q_sios_by_addr tree, decrement
* its data volume from the containing range_seg_t and
* resort the q_exts_by_size tree to reflect that the
* range_seg_t has lost some of its 'fill'. We don't shorten
* the range_seg_t - this is usually rare enough not to be
* worth the extra hassle of trying keep track of precise
* extent boundaries.
* 2) Hot, where the zio is currently in-flight in
* dsl_scan_issue_ios. In this case, we can't simply
* reach in and stop the in-flight zio's, so we instead
* block the caller. Eventually, dsl_scan_issue_ios will
* be done with issuing the zio's it gathered and will
* signal us.
*/
sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx);
sio_free(srch_sio);
if (sio != NULL) {
blkptr_t tmpbp;
/* Got it while it was cold in the queue */
ASSERT3U(start, ==, SIO_GET_OFFSET(sio));
ASSERT3U(size, ==, SIO_GET_ASIZE(sio));
avl_remove(&queue->q_sios_by_addr, sio);
if (avl_is_empty(&queue->q_sios_by_addr))
atomic_add_64(&scn->scn_queues_pending, -1);
queue->q_sio_memused -= SIO_GET_MUSED(sio);
ASSERT(range_tree_contains(queue->q_exts_by_addr, start, size));
range_tree_remove_fill(queue->q_exts_by_addr, start, size);
/* count the block as though we skipped it */
sio2bp(sio, &tmpbp);
count_block_skipped(scn, &tmpbp, B_FALSE);
sio_free(sio);
}
mutex_exit(q_lock);
}
/*
* Callback invoked when a zio_free() zio is executing. This needs to be
* intercepted to prevent the zio from deallocating a particular portion
* of disk space and it then getting reallocated and written to, while we
* still have it queued up for processing.
*/
void
dsl_scan_freed(spa_t *spa, const blkptr_t *bp)
{
dsl_pool_t *dp = spa->spa_dsl_pool;
dsl_scan_t *scn = dp->dp_scan;
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(scn != NULL);
if (!dsl_scan_is_running(scn))
return;
for (int i = 0; i < BP_GET_NDVAS(bp); i++)
dsl_scan_freed_dva(spa, bp, i);
}
/*
* Check if a vdev needs resilvering (non-empty DTL), if so, and resilver has
* not started, start it. Otherwise, only restart if max txg in DTL range is
* greater than the max txg in the current scan. If the DTL max is less than
* the scan max, then the vdev has not missed any new data since the resilver
* started, so a restart is not needed.
*/
void
dsl_scan_assess_vdev(dsl_pool_t *dp, vdev_t *vd)
{
uint64_t min, max;
if (!vdev_resilver_needed(vd, &min, &max))
return;
if (!dsl_scan_resilvering(dp)) {
spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER);
return;
}
if (max <= dp->dp_scan->scn_phys.scn_max_txg)
return;
/* restart is needed, check if it can be deferred */
if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))
vdev_defer_resilver(vd);
else
spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER);
}
ZFS_MODULE_PARAM(zfs, zfs_, scan_vdev_limit, U64, ZMOD_RW,
"Max bytes in flight per leaf vdev for scrubs and resilvers");
ZFS_MODULE_PARAM(zfs, zfs_, scrub_min_time_ms, UINT, ZMOD_RW,
"Min millisecs to scrub per txg");
ZFS_MODULE_PARAM(zfs, zfs_, obsolete_min_time_ms, UINT, ZMOD_RW,
"Min millisecs to obsolete per txg");
ZFS_MODULE_PARAM(zfs, zfs_, free_min_time_ms, UINT, ZMOD_RW,
"Min millisecs to free per txg");
ZFS_MODULE_PARAM(zfs, zfs_, resilver_min_time_ms, UINT, ZMOD_RW,
"Min millisecs to resilver per txg");
ZFS_MODULE_PARAM(zfs, zfs_, scan_suspend_progress, INT, ZMOD_RW,
"Set to prevent scans from progressing");
ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_io, INT, ZMOD_RW,
"Set to disable scrub I/O");
ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_prefetch, INT, ZMOD_RW,
"Set to disable scrub prefetching");
ZFS_MODULE_PARAM(zfs, zfs_, async_block_max_blocks, U64, ZMOD_RW,
"Max number of blocks freed in one txg");
ZFS_MODULE_PARAM(zfs, zfs_, max_async_dedup_frees, U64, ZMOD_RW,
"Max number of dedup blocks freed in one txg");
ZFS_MODULE_PARAM(zfs, zfs_, free_bpobj_enabled, INT, ZMOD_RW,
"Enable processing of the free_bpobj");
ZFS_MODULE_PARAM(zfs, zfs_, scan_blkstats, INT, ZMOD_RW,
"Enable block statistics calculation during scrub");
ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, UINT, ZMOD_RW,
"Fraction of RAM for scan hard limit");
ZFS_MODULE_PARAM(zfs, zfs_, scan_issue_strategy, UINT, ZMOD_RW,
"IO issuing strategy during scrubbing. 0 = default, 1 = LBA, 2 = size");
ZFS_MODULE_PARAM(zfs, zfs_, scan_legacy, INT, ZMOD_RW,
"Scrub using legacy non-sequential method");
ZFS_MODULE_PARAM(zfs, zfs_, scan_checkpoint_intval, UINT, ZMOD_RW,
"Scan progress on-disk checkpointing interval");
ZFS_MODULE_PARAM(zfs, zfs_, scan_max_ext_gap, U64, ZMOD_RW,
"Max gap in bytes between sequential scrub / resilver I/Os");
ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_soft_fact, UINT, ZMOD_RW,
"Fraction of hard limit used as soft limit");
ZFS_MODULE_PARAM(zfs, zfs_, scan_strict_mem_lim, INT, ZMOD_RW,
"Tunable to attempt to reduce lock contention");
ZFS_MODULE_PARAM(zfs, zfs_, scan_fill_weight, UINT, ZMOD_RW,
"Tunable to adjust bias towards more filled segments during scans");
ZFS_MODULE_PARAM(zfs, zfs_, scan_report_txgs, UINT, ZMOD_RW,
"Tunable to report resilver performance over the last N txgs");
ZFS_MODULE_PARAM(zfs, zfs_, resilver_disable_defer, INT, ZMOD_RW,
"Process all resilvers immediately");
ZFS_MODULE_PARAM(zfs, zfs_, scrub_error_blocks_per_txg, UINT, ZMOD_RW,
"Error blocks to be scrubbed in one txg");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/metaslab.c b/sys/contrib/openzfs/module/zfs/metaslab.c
index 176247d63b76..20dc934593f1 100644
--- a/sys/contrib/openzfs/module/zfs/metaslab.c
+++ b/sys/contrib/openzfs/module/zfs/metaslab.c
@@ -1,6287 +1,6234 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
*/
#include <sys/zfs_context.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/space_map.h>
#include <sys/metaslab_impl.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_draid.h>
#include <sys/zio.h>
#include <sys/spa_impl.h>
#include <sys/zfeature.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/zap.h>
#include <sys/btree.h>
#define WITH_DF_BLOCK_ALLOCATOR
#define GANG_ALLOCATION(flags) \
((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
/*
* Metaslab granularity, in bytes. This is roughly similar to what would be
* referred to as the "stripe size" in traditional RAID arrays. In normal
* operation, we will try to write this amount of data to each disk before
* moving on to the next top-level vdev.
*/
static uint64_t metaslab_aliquot = 1024 * 1024;
/*
* For testing, make some blocks above a certain size be gang blocks.
*/
uint64_t metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1;
+/*
+ * Of blocks of size >= metaslab_force_ganging, actually gang them this often.
+ */
+uint_t metaslab_force_ganging_pct = 3;
+
/*
* In pools where the log space map feature is not enabled we touch
* multiple metaslabs (and their respective space maps) with each
* transaction group. Thus, we benefit from having a small space map
* block size since it allows us to issue more I/O operations scattered
* around the disk. So a sane default for the space map block size
* is 8~16K.
*/
int zfs_metaslab_sm_blksz_no_log = (1 << 14);
/*
* When the log space map feature is enabled, we accumulate a lot of
* changes per metaslab that are flushed once in a while so we benefit
* from a bigger block size like 128K for the metaslab space maps.
*/
int zfs_metaslab_sm_blksz_with_log = (1 << 17);
/*
* The in-core space map representation is more compact than its on-disk form.
* The zfs_condense_pct determines how much more compact the in-core
* space map representation must be before we compact it on-disk.
* Values should be greater than or equal to 100.
*/
uint_t zfs_condense_pct = 200;
/*
* Condensing a metaslab is not guaranteed to actually reduce the amount of
* space used on disk. In particular, a space map uses data in increments of
* MAX(1 << ashift, space_map_blksz), so a metaslab might use the
* same number of blocks after condensing. Since the goal of condensing is to
* reduce the number of IOPs required to read the space map, we only want to
* condense when we can be sure we will reduce the number of blocks used by the
* space map. Unfortunately, we cannot precisely compute whether or not this is
* the case in metaslab_should_condense since we are holding ms_lock. Instead,
* we apply the following heuristic: do not condense a spacemap unless the
* uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
* blocks.
*/
static const int zfs_metaslab_condense_block_threshold = 4;
/*
* The zfs_mg_noalloc_threshold defines which metaslab groups should
* be eligible for allocation. The value is defined as a percentage of
* free space. Metaslab groups that have more free space than
* zfs_mg_noalloc_threshold are always eligible for allocations. Once
* a metaslab group's free space is less than or equal to the
* zfs_mg_noalloc_threshold the allocator will avoid allocating to that
* group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
* Once all groups in the pool reach zfs_mg_noalloc_threshold then all
* groups are allowed to accept allocations. Gang blocks are always
* eligible to allocate on any metaslab group. The default value of 0 means
* no metaslab group will be excluded based on this criterion.
*/
static uint_t zfs_mg_noalloc_threshold = 0;
/*
* Metaslab groups are considered eligible for allocations if their
* fragmentation metric (measured as a percentage) is less than or
* equal to zfs_mg_fragmentation_threshold. If a metaslab group
* exceeds this threshold then it will be skipped unless all metaslab
* groups within the metaslab class have also crossed this threshold.
*
* This tunable was introduced to avoid edge cases where we continue
* allocating from very fragmented disks in our pool while other, less
* fragmented disks, exists. On the other hand, if all disks in the
* pool are uniformly approaching the threshold, the threshold can
* be a speed bump in performance, where we keep switching the disks
* that we allocate from (e.g. we allocate some segments from disk A
* making it bypassing the threshold while freeing segments from disk
* B getting its fragmentation below the threshold).
*
* Empirically, we've seen that our vdev selection for allocations is
* good enough that fragmentation increases uniformly across all vdevs
* the majority of the time. Thus we set the threshold percentage high
* enough to avoid hitting the speed bump on pools that are being pushed
* to the edge.
*/
static uint_t zfs_mg_fragmentation_threshold = 95;
/*
* Allow metaslabs to keep their active state as long as their fragmentation
* percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
* active metaslab that exceeds this threshold will no longer keep its active
* status allowing better metaslabs to be selected.
*/
static uint_t zfs_metaslab_fragmentation_threshold = 70;
/*
* When set will load all metaslabs when pool is first opened.
*/
int metaslab_debug_load = B_FALSE;
/*
* When set will prevent metaslabs from being unloaded.
*/
static int metaslab_debug_unload = B_FALSE;
/*
* Minimum size which forces the dynamic allocator to change
* it's allocation strategy. Once the space map cannot satisfy
* an allocation of this size then it switches to using more
* aggressive strategy (i.e search by size rather than offset).
*/
uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
/*
* The minimum free space, in percent, which must be available
* in a space map to continue allocations in a first-fit fashion.
* Once the space map's free space drops below this level we dynamically
* switch to using best-fit allocations.
*/
uint_t metaslab_df_free_pct = 4;
/*
* Maximum distance to search forward from the last offset. Without this
* limit, fragmented pools can see >100,000 iterations and
* metaslab_block_picker() becomes the performance limiting factor on
* high-performance storage.
*
* With the default setting of 16MB, we typically see less than 500
* iterations, even with very fragmented, ashift=9 pools. The maximum number
* of iterations possible is:
* metaslab_df_max_search / (2 * (1<<ashift))
* With the default setting of 16MB this is 16*1024 (with ashift=9) or
* 2048 (with ashift=12).
*/
static uint_t metaslab_df_max_search = 16 * 1024 * 1024;
/*
* Forces the metaslab_block_picker function to search for at least this many
* segments forwards until giving up on finding a segment that the allocation
* will fit into.
*/
static const uint32_t metaslab_min_search_count = 100;
/*
* If we are not searching forward (due to metaslab_df_max_search,
* metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable
* controls what segment is used. If it is set, we will use the largest free
* segment. If it is not set, we will use a segment of exactly the requested
* size (or larger).
*/
static int metaslab_df_use_largest_segment = B_FALSE;
/*
* Percentage of all cpus that can be used by the metaslab taskq.
*/
int metaslab_load_pct = 50;
/*
* These tunables control how long a metaslab will remain loaded after the
* last allocation from it. A metaslab can't be unloaded until at least
* metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds
* have elapsed. However, zfs_metaslab_mem_limit may cause it to be
* unloaded sooner. These settings are intended to be generous -- to keep
* metaslabs loaded for a long time, reducing the rate of metaslab loading.
*/
static uint_t metaslab_unload_delay = 32;
static uint_t metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */
/*
* Max number of metaslabs per group to preload.
*/
uint_t metaslab_preload_limit = 10;
/*
* Enable/disable preloading of metaslab.
*/
static int metaslab_preload_enabled = B_TRUE;
/*
* Enable/disable fragmentation weighting on metaslabs.
*/
static int metaslab_fragmentation_factor_enabled = B_TRUE;
/*
* Enable/disable lba weighting (i.e. outer tracks are given preference).
*/
static int metaslab_lba_weighting_enabled = B_TRUE;
/*
* Enable/disable metaslab group biasing.
*/
static int metaslab_bias_enabled = B_TRUE;
/*
* Enable/disable remapping of indirect DVAs to their concrete vdevs.
*/
static const boolean_t zfs_remap_blkptr_enable = B_TRUE;
/*
* Enable/disable segment-based metaslab selection.
*/
static int zfs_metaslab_segment_weight_enabled = B_TRUE;
/*
* When using segment-based metaslab selection, we will continue
* allocating from the active metaslab until we have exhausted
* zfs_metaslab_switch_threshold of its buckets.
*/
static int zfs_metaslab_switch_threshold = 2;
/*
* Internal switch to enable/disable the metaslab allocation tracing
* facility.
*/
static const boolean_t metaslab_trace_enabled = B_FALSE;
/*
* Maximum entries that the metaslab allocation tracing facility will keep
* in a given list when running in non-debug mode. We limit the number
* of entries in non-debug mode to prevent us from using up too much memory.
* The limit should be sufficiently large that we don't expect any allocation
* to every exceed this value. In debug mode, the system will panic if this
* limit is ever reached allowing for further investigation.
*/
static const uint64_t metaslab_trace_max_entries = 5000;
/*
* Maximum number of metaslabs per group that can be disabled
* simultaneously.
*/
static const int max_disabled_ms = 3;
/*
* Time (in seconds) to respect ms_max_size when the metaslab is not loaded.
* To avoid 64-bit overflow, don't set above UINT32_MAX.
*/
static uint64_t zfs_metaslab_max_size_cache_sec = 1 * 60 * 60; /* 1 hour */
/*
* Maximum percentage of memory to use on storing loaded metaslabs. If loading
* a metaslab would take it over this percentage, the oldest selected metaslab
* is automatically unloaded.
*/
static uint_t zfs_metaslab_mem_limit = 25;
/*
* Force the per-metaslab range trees to use 64-bit integers to store
* segments. Used for debugging purposes.
*/
static const boolean_t zfs_metaslab_force_large_segs = B_FALSE;
/*
* By default we only store segments over a certain size in the size-sorted
* metaslab trees (ms_allocatable_by_size and
* ms_unflushed_frees_by_size). This dramatically reduces memory usage and
* improves load and unload times at the cost of causing us to use slightly
* larger segments than we would otherwise in some cases.
*/
static const uint32_t metaslab_by_size_min_shift = 14;
/*
* If not set, we will first try normal allocation. If that fails then
* we will do a gang allocation. If that fails then we will do a "try hard"
* gang allocation. If that fails then we will have a multi-layer gang
* block.
*
* If set, we will first try normal allocation. If that fails then
* we will do a "try hard" allocation. If that fails we will do a gang
* allocation. If that fails we will do a "try hard" gang allocation. If
* that fails then we will have a multi-layer gang block.
*/
static int zfs_metaslab_try_hard_before_gang = B_FALSE;
/*
* When not trying hard, we only consider the best zfs_metaslab_find_max_tries
* metaslabs. This improves performance, especially when there are many
* metaslabs per vdev and the allocation can't actually be satisfied (so we
* would otherwise iterate all the metaslabs). If there is a metaslab with a
* worse weight but it can actually satisfy the allocation, we won't find it
* until trying hard. This may happen if the worse metaslab is not loaded
* (and the true weight is better than we have calculated), or due to weight
* bucketization. E.g. we are looking for a 60K segment, and the best
* metaslabs all have free segments in the 32-63K bucket, but the best
* zfs_metaslab_find_max_tries metaslabs have ms_max_size <60KB, and a
* subsequent metaslab has ms_max_size >60KB (but fewer segments in this
* bucket, and therefore a lower weight).
*/
static uint_t zfs_metaslab_find_max_tries = 100;
static uint64_t metaslab_weight(metaslab_t *, boolean_t);
static void metaslab_set_fragmentation(metaslab_t *, boolean_t);
static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
static void metaslab_flush_update(metaslab_t *, dmu_tx_t *);
static unsigned int metaslab_idx_func(multilist_t *, void *);
static void metaslab_evict(metaslab_t *, uint64_t);
static void metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg);
kmem_cache_t *metaslab_alloc_trace_cache;
typedef struct metaslab_stats {
kstat_named_t metaslabstat_trace_over_limit;
kstat_named_t metaslabstat_reload_tree;
kstat_named_t metaslabstat_too_many_tries;
kstat_named_t metaslabstat_try_hard;
} metaslab_stats_t;
static metaslab_stats_t metaslab_stats = {
{ "trace_over_limit", KSTAT_DATA_UINT64 },
{ "reload_tree", KSTAT_DATA_UINT64 },
{ "too_many_tries", KSTAT_DATA_UINT64 },
{ "try_hard", KSTAT_DATA_UINT64 },
};
#define METASLABSTAT_BUMP(stat) \
atomic_inc_64(&metaslab_stats.stat.value.ui64);
static kstat_t *metaslab_ksp;
void
metaslab_stat_init(void)
{
ASSERT(metaslab_alloc_trace_cache == NULL);
metaslab_alloc_trace_cache = kmem_cache_create(
"metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
0, NULL, NULL, NULL, NULL, NULL, 0);
metaslab_ksp = kstat_create("zfs", 0, "metaslab_stats",
"misc", KSTAT_TYPE_NAMED, sizeof (metaslab_stats) /
sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (metaslab_ksp != NULL) {
metaslab_ksp->ks_data = &metaslab_stats;
kstat_install(metaslab_ksp);
}
}
void
metaslab_stat_fini(void)
{
if (metaslab_ksp != NULL) {
kstat_delete(metaslab_ksp);
metaslab_ksp = NULL;
}
kmem_cache_destroy(metaslab_alloc_trace_cache);
metaslab_alloc_trace_cache = NULL;
}
/*
* ==========================================================================
* Metaslab classes
* ==========================================================================
*/
metaslab_class_t *
metaslab_class_create(spa_t *spa, const metaslab_ops_t *ops)
{
metaslab_class_t *mc;
mc = kmem_zalloc(offsetof(metaslab_class_t,
mc_allocator[spa->spa_alloc_count]), KM_SLEEP);
mc->mc_spa = spa;
mc->mc_ops = ops;
mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
multilist_create(&mc->mc_metaslab_txg_list, sizeof (metaslab_t),
offsetof(metaslab_t, ms_class_txg_node), metaslab_idx_func);
for (int i = 0; i < spa->spa_alloc_count; i++) {
metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
mca->mca_rotor = NULL;
zfs_refcount_create_tracked(&mca->mca_alloc_slots);
}
return (mc);
}
void
metaslab_class_destroy(metaslab_class_t *mc)
{
spa_t *spa = mc->mc_spa;
ASSERT(mc->mc_alloc == 0);
ASSERT(mc->mc_deferred == 0);
ASSERT(mc->mc_space == 0);
ASSERT(mc->mc_dspace == 0);
for (int i = 0; i < spa->spa_alloc_count; i++) {
metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
ASSERT(mca->mca_rotor == NULL);
zfs_refcount_destroy(&mca->mca_alloc_slots);
}
mutex_destroy(&mc->mc_lock);
multilist_destroy(&mc->mc_metaslab_txg_list);
kmem_free(mc, offsetof(metaslab_class_t,
mc_allocator[spa->spa_alloc_count]));
}
int
metaslab_class_validate(metaslab_class_t *mc)
{
metaslab_group_t *mg;
vdev_t *vd;
/*
* Must hold one of the spa_config locks.
*/
ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
if ((mg = mc->mc_allocator[0].mca_rotor) == NULL)
return (0);
do {
vd = mg->mg_vd;
ASSERT(vd->vdev_mg != NULL);
ASSERT3P(vd->vdev_top, ==, vd);
ASSERT3P(mg->mg_class, ==, mc);
ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
} while ((mg = mg->mg_next) != mc->mc_allocator[0].mca_rotor);
return (0);
}
static void
metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
{
atomic_add_64(&mc->mc_alloc, alloc_delta);
atomic_add_64(&mc->mc_deferred, defer_delta);
atomic_add_64(&mc->mc_space, space_delta);
atomic_add_64(&mc->mc_dspace, dspace_delta);
}
uint64_t
metaslab_class_get_alloc(metaslab_class_t *mc)
{
return (mc->mc_alloc);
}
uint64_t
metaslab_class_get_deferred(metaslab_class_t *mc)
{
return (mc->mc_deferred);
}
uint64_t
metaslab_class_get_space(metaslab_class_t *mc)
{
return (mc->mc_space);
}
uint64_t
metaslab_class_get_dspace(metaslab_class_t *mc)
{
return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
}
void
metaslab_class_histogram_verify(metaslab_class_t *mc)
{
spa_t *spa = mc->mc_spa;
vdev_t *rvd = spa->spa_root_vdev;
uint64_t *mc_hist;
int i;
if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
return;
mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
KM_SLEEP);
mutex_enter(&mc->mc_lock);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = vdev_get_mg(tvd, mc);
/*
* Skip any holes, uninitialized top-levels, or
* vdevs that are not in this metalab class.
*/
if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
mg->mg_class != mc) {
continue;
}
IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
mc_hist[i] += mg->mg_histogram[i];
}
for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
}
mutex_exit(&mc->mc_lock);
kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
}
/*
* Calculate the metaslab class's fragmentation metric. The metric
* is weighted based on the space contribution of each metaslab group.
* The return value will be a number between 0 and 100 (inclusive), or
* ZFS_FRAG_INVALID if the metric has not been set. See comment above the
* zfs_frag_table for more information about the metric.
*/
uint64_t
metaslab_class_fragmentation(metaslab_class_t *mc)
{
vdev_t *rvd = mc->mc_spa->spa_root_vdev;
uint64_t fragmentation = 0;
spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
/*
* Skip any holes, uninitialized top-levels,
* or vdevs that are not in this metalab class.
*/
if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
mg->mg_class != mc) {
continue;
}
/*
* If a metaslab group does not contain a fragmentation
* metric then just bail out.
*/
if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
return (ZFS_FRAG_INVALID);
}
/*
* Determine how much this metaslab_group is contributing
* to the overall pool fragmentation metric.
*/
fragmentation += mg->mg_fragmentation *
metaslab_group_get_space(mg);
}
fragmentation /= metaslab_class_get_space(mc);
ASSERT3U(fragmentation, <=, 100);
spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
return (fragmentation);
}
/*
* Calculate the amount of expandable space that is available in
* this metaslab class. If a device is expanded then its expandable
* space will be the amount of allocatable space that is currently not
* part of this metaslab class.
*/
uint64_t
metaslab_class_expandable_space(metaslab_class_t *mc)
{
vdev_t *rvd = mc->mc_spa->spa_root_vdev;
uint64_t space = 0;
spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
mg->mg_class != mc) {
continue;
}
/*
* Calculate if we have enough space to add additional
* metaslabs. We report the expandable space in terms
* of the metaslab size since that's the unit of expansion.
*/
space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
1ULL << tvd->vdev_ms_shift);
}
spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
return (space);
}
void
metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg)
{
multilist_t *ml = &mc->mc_metaslab_txg_list;
for (int i = 0; i < multilist_get_num_sublists(ml); i++) {
multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
metaslab_t *msp = multilist_sublist_head(mls);
multilist_sublist_unlock(mls);
while (msp != NULL) {
mutex_enter(&msp->ms_lock);
/*
* If the metaslab has been removed from the list
* (which could happen if we were at the memory limit
* and it was evicted during this loop), then we can't
* proceed and we should restart the sublist.
*/
if (!multilist_link_active(&msp->ms_class_txg_node)) {
mutex_exit(&msp->ms_lock);
i--;
break;
}
mls = multilist_sublist_lock(ml, i);
metaslab_t *next_msp = multilist_sublist_next(mls, msp);
multilist_sublist_unlock(mls);
if (txg >
msp->ms_selected_txg + metaslab_unload_delay &&
gethrtime() > msp->ms_selected_time +
(uint64_t)MSEC2NSEC(metaslab_unload_delay_ms)) {
metaslab_evict(msp, txg);
} else {
/*
* Once we've hit a metaslab selected too
* recently to evict, we're done evicting for
* now.
*/
mutex_exit(&msp->ms_lock);
break;
}
mutex_exit(&msp->ms_lock);
msp = next_msp;
}
}
}
static int
metaslab_compare(const void *x1, const void *x2)
{
const metaslab_t *m1 = (const metaslab_t *)x1;
const metaslab_t *m2 = (const metaslab_t *)x2;
int sort1 = 0;
int sort2 = 0;
if (m1->ms_allocator != -1 && m1->ms_primary)
sort1 = 1;
else if (m1->ms_allocator != -1 && !m1->ms_primary)
sort1 = 2;
if (m2->ms_allocator != -1 && m2->ms_primary)
sort2 = 1;
else if (m2->ms_allocator != -1 && !m2->ms_primary)
sort2 = 2;
/*
* Sort inactive metaslabs first, then primaries, then secondaries. When
* selecting a metaslab to allocate from, an allocator first tries its
* primary, then secondary active metaslab. If it doesn't have active
* metaslabs, or can't allocate from them, it searches for an inactive
* metaslab to activate. If it can't find a suitable one, it will steal
* a primary or secondary metaslab from another allocator.
*/
if (sort1 < sort2)
return (-1);
if (sort1 > sort2)
return (1);
int cmp = TREE_CMP(m2->ms_weight, m1->ms_weight);
if (likely(cmp))
return (cmp);
IMPLY(TREE_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
return (TREE_CMP(m1->ms_start, m2->ms_start));
}
/*
* ==========================================================================
* Metaslab groups
* ==========================================================================
*/
/*
* Update the allocatable flag and the metaslab group's capacity.
* The allocatable flag is set to true if the capacity is below
* the zfs_mg_noalloc_threshold or has a fragmentation value that is
* greater than zfs_mg_fragmentation_threshold. If a metaslab group
* transitions from allocatable to non-allocatable or vice versa then the
* metaslab group's class is updated to reflect the transition.
*/
static void
metaslab_group_alloc_update(metaslab_group_t *mg)
{
vdev_t *vd = mg->mg_vd;
metaslab_class_t *mc = mg->mg_class;
vdev_stat_t *vs = &vd->vdev_stat;
boolean_t was_allocatable;
boolean_t was_initialized;
ASSERT(vd == vd->vdev_top);
ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
SCL_ALLOC);
mutex_enter(&mg->mg_lock);
was_allocatable = mg->mg_allocatable;
was_initialized = mg->mg_initialized;
mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
(vs->vs_space + 1);
mutex_enter(&mc->mc_lock);
/*
* If the metaslab group was just added then it won't
* have any space until we finish syncing out this txg.
* At that point we will consider it initialized and available
* for allocations. We also don't consider non-activated
* metaslab groups (e.g. vdevs that are in the middle of being removed)
* to be initialized, because they can't be used for allocation.
*/
mg->mg_initialized = metaslab_group_initialized(mg);
if (!was_initialized && mg->mg_initialized) {
mc->mc_groups++;
} else if (was_initialized && !mg->mg_initialized) {
ASSERT3U(mc->mc_groups, >, 0);
mc->mc_groups--;
}
if (mg->mg_initialized)
mg->mg_no_free_space = B_FALSE;
/*
* A metaslab group is considered allocatable if it has plenty
* of free space or is not heavily fragmented. We only take
* fragmentation into account if the metaslab group has a valid
* fragmentation metric (i.e. a value between 0 and 100).
*/
mg->mg_allocatable = (mg->mg_activation_count > 0 &&
mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
(mg->mg_fragmentation == ZFS_FRAG_INVALID ||
mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
/*
* The mc_alloc_groups maintains a count of the number of
* groups in this metaslab class that are still above the
* zfs_mg_noalloc_threshold. This is used by the allocating
* threads to determine if they should avoid allocations to
* a given group. The allocator will avoid allocations to a group
* if that group has reached or is below the zfs_mg_noalloc_threshold
* and there are still other groups that are above the threshold.
* When a group transitions from allocatable to non-allocatable or
* vice versa we update the metaslab class to reflect that change.
* When the mc_alloc_groups value drops to 0 that means that all
* groups have reached the zfs_mg_noalloc_threshold making all groups
* eligible for allocations. This effectively means that all devices
* are balanced again.
*/
if (was_allocatable && !mg->mg_allocatable)
mc->mc_alloc_groups--;
else if (!was_allocatable && mg->mg_allocatable)
mc->mc_alloc_groups++;
mutex_exit(&mc->mc_lock);
mutex_exit(&mg->mg_lock);
}
int
metaslab_sort_by_flushed(const void *va, const void *vb)
{
const metaslab_t *a = va;
const metaslab_t *b = vb;
int cmp = TREE_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg);
if (likely(cmp))
return (cmp);
uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id;
uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id;
cmp = TREE_CMP(a_vdev_id, b_vdev_id);
if (cmp)
return (cmp);
return (TREE_CMP(a->ms_id, b->ms_id));
}
metaslab_group_t *
metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
{
metaslab_group_t *mg;
mg = kmem_zalloc(offsetof(metaslab_group_t,
mg_allocator[allocators]), KM_SLEEP);
mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL);
avl_create(&mg->mg_metaslab_tree, metaslab_compare,
sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node));
mg->mg_vd = vd;
mg->mg_class = mc;
mg->mg_activation_count = 0;
mg->mg_initialized = B_FALSE;
mg->mg_no_free_space = B_TRUE;
mg->mg_allocators = allocators;
for (int i = 0; i < allocators; i++) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
zfs_refcount_create_tracked(&mga->mga_alloc_queue_depth);
}
mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC);
return (mg);
}
void
metaslab_group_destroy(metaslab_group_t *mg)
{
ASSERT(mg->mg_prev == NULL);
ASSERT(mg->mg_next == NULL);
/*
* We may have gone below zero with the activation count
* either because we never activated in the first place or
* because we're done, and possibly removing the vdev.
*/
ASSERT(mg->mg_activation_count <= 0);
taskq_destroy(mg->mg_taskq);
avl_destroy(&mg->mg_metaslab_tree);
mutex_destroy(&mg->mg_lock);
mutex_destroy(&mg->mg_ms_disabled_lock);
cv_destroy(&mg->mg_ms_disabled_cv);
for (int i = 0; i < mg->mg_allocators; i++) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
zfs_refcount_destroy(&mga->mga_alloc_queue_depth);
}
kmem_free(mg, offsetof(metaslab_group_t,
mg_allocator[mg->mg_allocators]));
}
void
metaslab_group_activate(metaslab_group_t *mg)
{
metaslab_class_t *mc = mg->mg_class;
spa_t *spa = mc->mc_spa;
metaslab_group_t *mgprev, *mgnext;
ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0);
ASSERT(mg->mg_prev == NULL);
ASSERT(mg->mg_next == NULL);
ASSERT(mg->mg_activation_count <= 0);
if (++mg->mg_activation_count <= 0)
return;
mg->mg_aliquot = metaslab_aliquot * MAX(1,
vdev_get_ndisks(mg->mg_vd) - vdev_get_nparity(mg->mg_vd));
metaslab_group_alloc_update(mg);
if ((mgprev = mc->mc_allocator[0].mca_rotor) == NULL) {
mg->mg_prev = mg;
mg->mg_next = mg;
} else {
mgnext = mgprev->mg_next;
mg->mg_prev = mgprev;
mg->mg_next = mgnext;
mgprev->mg_next = mg;
mgnext->mg_prev = mg;
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
mc->mc_allocator[i].mca_rotor = mg;
mg = mg->mg_next;
}
}
/*
* Passivate a metaslab group and remove it from the allocation rotor.
* Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
* a metaslab group. This function will momentarily drop spa_config_locks
* that are lower than the SCL_ALLOC lock (see comment below).
*/
void
metaslab_group_passivate(metaslab_group_t *mg)
{
metaslab_class_t *mc = mg->mg_class;
spa_t *spa = mc->mc_spa;
metaslab_group_t *mgprev, *mgnext;
int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
(SCL_ALLOC | SCL_ZIO));
if (--mg->mg_activation_count != 0) {
for (int i = 0; i < spa->spa_alloc_count; i++)
ASSERT(mc->mc_allocator[i].mca_rotor != mg);
ASSERT(mg->mg_prev == NULL);
ASSERT(mg->mg_next == NULL);
ASSERT(mg->mg_activation_count < 0);
return;
}
/*
* The spa_config_lock is an array of rwlocks, ordered as
* follows (from highest to lowest):
* SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
* SCL_ZIO > SCL_FREE > SCL_VDEV
* (For more information about the spa_config_lock see spa_misc.c)
* The higher the lock, the broader its coverage. When we passivate
* a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
* config locks. However, the metaslab group's taskq might be trying
* to preload metaslabs so we must drop the SCL_ZIO lock and any
* lower locks to allow the I/O to complete. At a minimum,
* we continue to hold the SCL_ALLOC lock, which prevents any future
* allocations from taking place and any changes to the vdev tree.
*/
spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
taskq_wait_outstanding(mg->mg_taskq, 0);
spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
metaslab_group_alloc_update(mg);
for (int i = 0; i < mg->mg_allocators; i++) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
metaslab_t *msp = mga->mga_primary;
if (msp != NULL) {
mutex_enter(&msp->ms_lock);
metaslab_passivate(msp,
metaslab_weight_from_range_tree(msp));
mutex_exit(&msp->ms_lock);
}
msp = mga->mga_secondary;
if (msp != NULL) {
mutex_enter(&msp->ms_lock);
metaslab_passivate(msp,
metaslab_weight_from_range_tree(msp));
mutex_exit(&msp->ms_lock);
}
}
mgprev = mg->mg_prev;
mgnext = mg->mg_next;
if (mg == mgnext) {
mgnext = NULL;
} else {
mgprev->mg_next = mgnext;
mgnext->mg_prev = mgprev;
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
if (mc->mc_allocator[i].mca_rotor == mg)
mc->mc_allocator[i].mca_rotor = mgnext;
}
mg->mg_prev = NULL;
mg->mg_next = NULL;
}
boolean_t
metaslab_group_initialized(metaslab_group_t *mg)
{
vdev_t *vd = mg->mg_vd;
vdev_stat_t *vs = &vd->vdev_stat;
return (vs->vs_space != 0 && mg->mg_activation_count > 0);
}
uint64_t
metaslab_group_get_space(metaslab_group_t *mg)
{
/*
* Note that the number of nodes in mg_metaslab_tree may be one less
* than vdev_ms_count, due to the embedded log metaslab.
*/
mutex_enter(&mg->mg_lock);
uint64_t ms_count = avl_numnodes(&mg->mg_metaslab_tree);
mutex_exit(&mg->mg_lock);
return ((1ULL << mg->mg_vd->vdev_ms_shift) * ms_count);
}
void
metaslab_group_histogram_verify(metaslab_group_t *mg)
{
uint64_t *mg_hist;
avl_tree_t *t = &mg->mg_metaslab_tree;
uint64_t ashift = mg->mg_vd->vdev_ashift;
if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
return;
mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
KM_SLEEP);
ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
SPACE_MAP_HISTOGRAM_SIZE + ashift);
mutex_enter(&mg->mg_lock);
for (metaslab_t *msp = avl_first(t);
msp != NULL; msp = AVL_NEXT(t, msp)) {
VERIFY3P(msp->ms_group, ==, mg);
/* skip if not active */
if (msp->ms_sm == NULL)
continue;
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
mg_hist[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
}
}
for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
mutex_exit(&mg->mg_lock);
kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
}
static void
metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
{
metaslab_class_t *mc = mg->mg_class;
uint64_t ashift = mg->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_sm == NULL)
return;
mutex_enter(&mg->mg_lock);
mutex_enter(&mc->mc_lock);
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
mg->mg_histogram[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
mc->mc_histogram[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
}
mutex_exit(&mc->mc_lock);
mutex_exit(&mg->mg_lock);
}
void
metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
{
metaslab_class_t *mc = mg->mg_class;
uint64_t ashift = mg->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_sm == NULL)
return;
mutex_enter(&mg->mg_lock);
mutex_enter(&mc->mc_lock);
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
ASSERT3U(mg->mg_histogram[i + ashift], >=,
msp->ms_sm->sm_phys->smp_histogram[i]);
ASSERT3U(mc->mc_histogram[i + ashift], >=,
msp->ms_sm->sm_phys->smp_histogram[i]);
IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
mg->mg_histogram[i + ashift] -=
msp->ms_sm->sm_phys->smp_histogram[i];
mc->mc_histogram[i + ashift] -=
msp->ms_sm->sm_phys->smp_histogram[i];
}
mutex_exit(&mc->mc_lock);
mutex_exit(&mg->mg_lock);
}
static void
metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
{
ASSERT(msp->ms_group == NULL);
mutex_enter(&mg->mg_lock);
msp->ms_group = mg;
msp->ms_weight = 0;
avl_add(&mg->mg_metaslab_tree, msp);
mutex_exit(&mg->mg_lock);
mutex_enter(&msp->ms_lock);
metaslab_group_histogram_add(mg, msp);
mutex_exit(&msp->ms_lock);
}
static void
metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
{
mutex_enter(&msp->ms_lock);
metaslab_group_histogram_remove(mg, msp);
mutex_exit(&msp->ms_lock);
mutex_enter(&mg->mg_lock);
ASSERT(msp->ms_group == mg);
avl_remove(&mg->mg_metaslab_tree, msp);
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (multilist_link_active(&msp->ms_class_txg_node))
multilist_sublist_remove(mls, msp);
multilist_sublist_unlock(mls);
msp->ms_group = NULL;
mutex_exit(&mg->mg_lock);
}
static void
metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(MUTEX_HELD(&mg->mg_lock));
ASSERT(msp->ms_group == mg);
avl_remove(&mg->mg_metaslab_tree, msp);
msp->ms_weight = weight;
avl_add(&mg->mg_metaslab_tree, msp);
}
static void
metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
{
/*
* Although in principle the weight can be any value, in
* practice we do not use values in the range [1, 511].
*/
ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
ASSERT(MUTEX_HELD(&msp->ms_lock));
mutex_enter(&mg->mg_lock);
metaslab_group_sort_impl(mg, msp, weight);
mutex_exit(&mg->mg_lock);
}
/*
* Calculate the fragmentation for a given metaslab group. We can use
* a simple average here since all metaslabs within the group must have
* the same size. The return value will be a value between 0 and 100
* (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
* group have a fragmentation metric.
*/
uint64_t
metaslab_group_fragmentation(metaslab_group_t *mg)
{
vdev_t *vd = mg->mg_vd;
uint64_t fragmentation = 0;
uint64_t valid_ms = 0;
for (int m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
continue;
if (msp->ms_group != mg)
continue;
valid_ms++;
fragmentation += msp->ms_fragmentation;
}
if (valid_ms <= mg->mg_vd->vdev_ms_count / 2)
return (ZFS_FRAG_INVALID);
fragmentation /= valid_ms;
ASSERT3U(fragmentation, <=, 100);
return (fragmentation);
}
/*
* Determine if a given metaslab group should skip allocations. A metaslab
* group should avoid allocations if its free capacity is less than the
* zfs_mg_noalloc_threshold or its fragmentation metric is greater than
* zfs_mg_fragmentation_threshold and there is at least one metaslab group
* that can still handle allocations. If the allocation throttle is enabled
* then we skip allocations to devices that have reached their maximum
* allocation queue depth unless the selected metaslab group is the only
* eligible group remaining.
*/
static boolean_t
metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
int flags, uint64_t psize, int allocator, int d)
{
spa_t *spa = mg->mg_vd->vdev_spa;
metaslab_class_t *mc = mg->mg_class;
/*
* We can only consider skipping this metaslab group if it's
* in the normal metaslab class and there are other metaslab
* groups to select from. Otherwise, we always consider it eligible
* for allocations.
*/
if ((mc != spa_normal_class(spa) &&
mc != spa_special_class(spa) &&
mc != spa_dedup_class(spa)) ||
mc->mc_groups <= 1)
return (B_TRUE);
/*
* If the metaslab group's mg_allocatable flag is set (see comments
* in metaslab_group_alloc_update() for more information) and
* the allocation throttle is disabled then allow allocations to this
* device. However, if the allocation throttle is enabled then
* check if we have reached our allocation limit (mga_alloc_queue_depth)
* to determine if we should allow allocations to this metaslab group.
* If all metaslab groups are no longer considered allocatable
* (mc_alloc_groups == 0) or we're trying to allocate the smallest
* gang block size then we allow allocations on this metaslab group
* regardless of the mg_allocatable or throttle settings.
*/
if (mg->mg_allocatable) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
int64_t qdepth;
uint64_t qmax = mga->mga_cur_max_alloc_queue_depth;
if (!mc->mc_alloc_throttle_enabled)
return (B_TRUE);
/*
* If this metaslab group does not have any free space, then
* there is no point in looking further.
*/
if (mg->mg_no_free_space)
return (B_FALSE);
/*
* Some allocations (e.g., those coming from device removal
* where the * allocations are not even counted in the
* metaslab * allocation queues) are allowed to bypass
* the throttle.
*/
if (flags & METASLAB_DONT_THROTTLE)
return (B_TRUE);
/*
* Relax allocation throttling for ditto blocks. Due to
* random imbalances in allocation it tends to push copies
* to one vdev, that looks a bit better at the moment.
*/
qmax = qmax * (4 + d) / 4;
qdepth = zfs_refcount_count(&mga->mga_alloc_queue_depth);
/*
* If this metaslab group is below its qmax or it's
- * the only allocatable metasable group, then attempt
+ * the only allocatable metaslab group, then attempt
* to allocate from it.
*/
if (qdepth < qmax || mc->mc_alloc_groups == 1)
return (B_TRUE);
ASSERT3U(mc->mc_alloc_groups, >, 1);
/*
* Since this metaslab group is at or over its qmax, we
* need to determine if there are metaslab groups after this
* one that might be able to handle this allocation. This is
* racy since we can't hold the locks for all metaslab
* groups at the same time when we make this check.
*/
for (metaslab_group_t *mgp = mg->mg_next;
mgp != rotor; mgp = mgp->mg_next) {
metaslab_group_allocator_t *mgap =
&mgp->mg_allocator[allocator];
qmax = mgap->mga_cur_max_alloc_queue_depth;
qmax = qmax * (4 + d) / 4;
qdepth =
zfs_refcount_count(&mgap->mga_alloc_queue_depth);
/*
* If there is another metaslab group that
* might be able to handle the allocation, then
* we return false so that we skip this group.
*/
if (qdepth < qmax && !mgp->mg_no_free_space)
return (B_FALSE);
}
/*
* We didn't find another group to handle the allocation
* so we can't skip this metaslab group even though
* we are at or over our qmax.
*/
return (B_TRUE);
} else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
return (B_TRUE);
}
return (B_FALSE);
}
/*
* ==========================================================================
* Range tree callbacks
* ==========================================================================
*/
/*
* Comparison function for the private size-ordered tree using 32-bit
* ranges. Tree is sorted by size, larger sizes at the end of the tree.
*/
__attribute__((always_inline)) inline
static int
metaslab_rangesize32_compare(const void *x1, const void *x2)
{
const range_seg32_t *r1 = x1;
const range_seg32_t *r2 = x2;
uint64_t rs_size1 = r1->rs_end - r1->rs_start;
uint64_t rs_size2 = r2->rs_end - r2->rs_start;
int cmp = TREE_CMP(rs_size1, rs_size2);
return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start));
}
/*
* Comparison function for the private size-ordered tree using 64-bit
* ranges. Tree is sorted by size, larger sizes at the end of the tree.
*/
__attribute__((always_inline)) inline
static int
metaslab_rangesize64_compare(const void *x1, const void *x2)
{
const range_seg64_t *r1 = x1;
const range_seg64_t *r2 = x2;
uint64_t rs_size1 = r1->rs_end - r1->rs_start;
uint64_t rs_size2 = r2->rs_end - r2->rs_start;
int cmp = TREE_CMP(rs_size1, rs_size2);
return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start));
}
typedef struct metaslab_rt_arg {
zfs_btree_t *mra_bt;
uint32_t mra_floor_shift;
} metaslab_rt_arg_t;
struct mssa_arg {
range_tree_t *rt;
metaslab_rt_arg_t *mra;
};
static void
metaslab_size_sorted_add(void *arg, uint64_t start, uint64_t size)
{
struct mssa_arg *mssap = arg;
range_tree_t *rt = mssap->rt;
metaslab_rt_arg_t *mrap = mssap->mra;
range_seg_max_t seg = {0};
rs_set_start(&seg, rt, start);
rs_set_end(&seg, rt, start + size);
metaslab_rt_add(rt, &seg, mrap);
}
static void
metaslab_size_tree_full_load(range_tree_t *rt)
{
metaslab_rt_arg_t *mrap = rt->rt_arg;
METASLABSTAT_BUMP(metaslabstat_reload_tree);
ASSERT0(zfs_btree_numnodes(mrap->mra_bt));
mrap->mra_floor_shift = 0;
struct mssa_arg arg = {0};
arg.rt = rt;
arg.mra = mrap;
range_tree_walk(rt, metaslab_size_sorted_add, &arg);
}
ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize32_in_buf,
range_seg32_t, metaslab_rangesize32_compare)
ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize64_in_buf,
range_seg64_t, metaslab_rangesize64_compare)
/*
* Create any block allocator specific components. The current allocators
* rely on using both a size-ordered range_tree_t and an array of uint64_t's.
*/
static void
metaslab_rt_create(range_tree_t *rt, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
size_t size;
int (*compare) (const void *, const void *);
bt_find_in_buf_f bt_find;
switch (rt->rt_type) {
case RANGE_SEG32:
size = sizeof (range_seg32_t);
compare = metaslab_rangesize32_compare;
bt_find = metaslab_rt_find_rangesize32_in_buf;
break;
case RANGE_SEG64:
size = sizeof (range_seg64_t);
compare = metaslab_rangesize64_compare;
bt_find = metaslab_rt_find_rangesize64_in_buf;
break;
default:
panic("Invalid range seg type %d", rt->rt_type);
}
zfs_btree_create(size_tree, compare, bt_find, size);
mrap->mra_floor_shift = metaslab_by_size_min_shift;
}
static void
metaslab_rt_destroy(range_tree_t *rt, void *arg)
{
(void) rt;
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
zfs_btree_destroy(size_tree);
kmem_free(mrap, sizeof (*mrap));
}
static void
metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
if (rs_get_end(rs, rt) - rs_get_start(rs, rt) <
(1ULL << mrap->mra_floor_shift))
return;
zfs_btree_add(size_tree, rs);
}
static void
metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1ULL <<
mrap->mra_floor_shift))
return;
zfs_btree_remove(size_tree, rs);
}
static void
metaslab_rt_vacate(range_tree_t *rt, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
zfs_btree_clear(size_tree);
zfs_btree_destroy(size_tree);
metaslab_rt_create(rt, arg);
}
static const range_tree_ops_t metaslab_rt_ops = {
.rtop_create = metaslab_rt_create,
.rtop_destroy = metaslab_rt_destroy,
.rtop_add = metaslab_rt_add,
.rtop_remove = metaslab_rt_remove,
.rtop_vacate = metaslab_rt_vacate
};
/*
* ==========================================================================
* Common allocator routines
* ==========================================================================
*/
/*
* Return the maximum contiguous segment within the metaslab.
*/
uint64_t
metaslab_largest_allocatable(metaslab_t *msp)
{
zfs_btree_t *t = &msp->ms_allocatable_by_size;
range_seg_t *rs;
if (t == NULL)
return (0);
if (zfs_btree_numnodes(t) == 0)
metaslab_size_tree_full_load(msp->ms_allocatable);
rs = zfs_btree_last(t, NULL);
if (rs == NULL)
return (0);
return (rs_get_end(rs, msp->ms_allocatable) - rs_get_start(rs,
msp->ms_allocatable));
}
/*
* Return the maximum contiguous segment within the unflushed frees of this
* metaslab.
*/
static uint64_t
metaslab_largest_unflushed_free(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_unflushed_frees == NULL)
return (0);
if (zfs_btree_numnodes(&msp->ms_unflushed_frees_by_size) == 0)
metaslab_size_tree_full_load(msp->ms_unflushed_frees);
range_seg_t *rs = zfs_btree_last(&msp->ms_unflushed_frees_by_size,
NULL);
if (rs == NULL)
return (0);
/*
* When a range is freed from the metaslab, that range is added to
* both the unflushed frees and the deferred frees. While the block
* will eventually be usable, if the metaslab were loaded the range
* would not be added to the ms_allocatable tree until TXG_DEFER_SIZE
* txgs had passed. As a result, when attempting to estimate an upper
* bound for the largest currently-usable free segment in the
* metaslab, we need to not consider any ranges currently in the defer
* trees. This algorithm approximates the largest available chunk in
* the largest range in the unflushed_frees tree by taking the first
* chunk. While this may be a poor estimate, it should only remain so
* briefly and should eventually self-correct as frees are no longer
* deferred. Similar logic applies to the ms_freed tree. See
* metaslab_load() for more details.
*
* There are two primary sources of inaccuracy in this estimate. Both
* are tolerated for performance reasons. The first source is that we
* only check the largest segment for overlaps. Smaller segments may
* have more favorable overlaps with the other trees, resulting in
* larger usable chunks. Second, we only look at the first chunk in
* the largest segment; there may be other usable chunks in the
* largest segment, but we ignore them.
*/
uint64_t rstart = rs_get_start(rs, msp->ms_unflushed_frees);
uint64_t rsize = rs_get_end(rs, msp->ms_unflushed_frees) - rstart;
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
uint64_t start = 0;
uint64_t size = 0;
boolean_t found = range_tree_find_in(msp->ms_defer[t], rstart,
rsize, &start, &size);
if (found) {
if (rstart == start)
return (0);
rsize = start - rstart;
}
}
uint64_t start = 0;
uint64_t size = 0;
boolean_t found = range_tree_find_in(msp->ms_freed, rstart,
rsize, &start, &size);
if (found)
rsize = start - rstart;
return (rsize);
}
static range_seg_t *
metaslab_block_find(zfs_btree_t *t, range_tree_t *rt, uint64_t start,
uint64_t size, zfs_btree_index_t *where)
{
range_seg_t *rs;
range_seg_max_t rsearch;
rs_set_start(&rsearch, rt, start);
rs_set_end(&rsearch, rt, start + size);
rs = zfs_btree_find(t, &rsearch, where);
if (rs == NULL) {
rs = zfs_btree_next(t, where, where);
}
return (rs);
}
#if defined(WITH_DF_BLOCK_ALLOCATOR) || \
defined(WITH_CF_BLOCK_ALLOCATOR)
/*
* This is a helper function that can be used by the allocator to find a
* suitable block to allocate. This will search the specified B-tree looking
* for a block that matches the specified criteria.
*/
static uint64_t
metaslab_block_picker(range_tree_t *rt, uint64_t *cursor, uint64_t size,
uint64_t max_search)
{
if (*cursor == 0)
*cursor = rt->rt_start;
zfs_btree_t *bt = &rt->rt_root;
zfs_btree_index_t where;
range_seg_t *rs = metaslab_block_find(bt, rt, *cursor, size, &where);
uint64_t first_found;
int count_searched = 0;
if (rs != NULL)
first_found = rs_get_start(rs, rt);
while (rs != NULL && (rs_get_start(rs, rt) - first_found <=
max_search || count_searched < metaslab_min_search_count)) {
uint64_t offset = rs_get_start(rs, rt);
if (offset + size <= rs_get_end(rs, rt)) {
*cursor = offset + size;
return (offset);
}
rs = zfs_btree_next(bt, &where, &where);
count_searched++;
}
*cursor = 0;
return (-1ULL);
}
#endif /* WITH_DF/CF_BLOCK_ALLOCATOR */
#if defined(WITH_DF_BLOCK_ALLOCATOR)
/*
* ==========================================================================
* Dynamic Fit (df) block allocator
*
* Search for a free chunk of at least this size, starting from the last
* offset (for this alignment of block) looking for up to
* metaslab_df_max_search bytes (16MB). If a large enough free chunk is not
* found within 16MB, then return a free chunk of exactly the requested size (or
* larger).
*
* If it seems like searching from the last offset will be unproductive, skip
* that and just return a free chunk of exactly the requested size (or larger).
* This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct. This
* mechanism is probably not very useful and may be removed in the future.
*
* The behavior when not searching can be changed to return the largest free
* chunk, instead of a free chunk of exactly the requested size, by setting
* metaslab_df_use_largest_segment.
* ==========================================================================
*/
static uint64_t
metaslab_df_alloc(metaslab_t *msp, uint64_t size)
{
/*
* Find the largest power of 2 block size that evenly divides the
* requested size. This is used to try to allocate blocks with similar
* alignment from the same area of the metaslab (i.e. same cursor
* bucket) but it does not guarantee that other allocations sizes
* may exist in the same region.
*/
uint64_t align = size & -size;
uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
range_tree_t *rt = msp->ms_allocatable;
uint_t free_pct = range_tree_space(rt) * 100 / msp->ms_size;
uint64_t offset;
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* If we're running low on space, find a segment based on size,
* rather than iterating based on offset.
*/
if (metaslab_largest_allocatable(msp) < metaslab_df_alloc_threshold ||
free_pct < metaslab_df_free_pct) {
offset = -1;
} else {
offset = metaslab_block_picker(rt,
cursor, size, metaslab_df_max_search);
}
if (offset == -1) {
range_seg_t *rs;
if (zfs_btree_numnodes(&msp->ms_allocatable_by_size) == 0)
metaslab_size_tree_full_load(msp->ms_allocatable);
if (metaslab_df_use_largest_segment) {
/* use largest free segment */
rs = zfs_btree_last(&msp->ms_allocatable_by_size, NULL);
} else {
zfs_btree_index_t where;
/* use segment of this size, or next largest */
rs = metaslab_block_find(&msp->ms_allocatable_by_size,
rt, msp->ms_start, size, &where);
}
if (rs != NULL && rs_get_start(rs, rt) + size <= rs_get_end(rs,
rt)) {
offset = rs_get_start(rs, rt);
*cursor = offset + size;
}
}
return (offset);
}
const metaslab_ops_t zfs_metaslab_ops = {
metaslab_df_alloc
};
#endif /* WITH_DF_BLOCK_ALLOCATOR */
#if defined(WITH_CF_BLOCK_ALLOCATOR)
/*
* ==========================================================================
* Cursor fit block allocator -
* Select the largest region in the metaslab, set the cursor to the beginning
* of the range and the cursor_end to the end of the range. As allocations
* are made advance the cursor. Continue allocating from the cursor until
* the range is exhausted and then find a new range.
* ==========================================================================
*/
static uint64_t
metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
{
range_tree_t *rt = msp->ms_allocatable;
zfs_btree_t *t = &msp->ms_allocatable_by_size;
uint64_t *cursor = &msp->ms_lbas[0];
uint64_t *cursor_end = &msp->ms_lbas[1];
uint64_t offset = 0;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(*cursor_end, >=, *cursor);
if ((*cursor + size) > *cursor_end) {
range_seg_t *rs;
if (zfs_btree_numnodes(t) == 0)
metaslab_size_tree_full_load(msp->ms_allocatable);
rs = zfs_btree_last(t, NULL);
if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) <
size)
return (-1ULL);
*cursor = rs_get_start(rs, rt);
*cursor_end = rs_get_end(rs, rt);
}
offset = *cursor;
*cursor += size;
return (offset);
}
const metaslab_ops_t zfs_metaslab_ops = {
metaslab_cf_alloc
};
#endif /* WITH_CF_BLOCK_ALLOCATOR */
#if defined(WITH_NDF_BLOCK_ALLOCATOR)
/*
* ==========================================================================
* New dynamic fit allocator -
* Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
* contiguous blocks. If no region is found then just use the largest segment
* that remains.
* ==========================================================================
*/
/*
* Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
* to request from the allocator.
*/
uint64_t metaslab_ndf_clump_shift = 4;
static uint64_t
metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
{
zfs_btree_t *t = &msp->ms_allocatable->rt_root;
range_tree_t *rt = msp->ms_allocatable;
zfs_btree_index_t where;
range_seg_t *rs;
range_seg_max_t rsearch;
uint64_t hbit = highbit64(size);
uint64_t *cursor = &msp->ms_lbas[hbit - 1];
uint64_t max_size = metaslab_largest_allocatable(msp);
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (max_size < size)
return (-1ULL);
rs_set_start(&rsearch, rt, *cursor);
rs_set_end(&rsearch, rt, *cursor + size);
rs = zfs_btree_find(t, &rsearch, &where);
if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < size) {
t = &msp->ms_allocatable_by_size;
rs_set_start(&rsearch, rt, 0);
rs_set_end(&rsearch, rt, MIN(max_size, 1ULL << (hbit +
metaslab_ndf_clump_shift)));
rs = zfs_btree_find(t, &rsearch, &where);
if (rs == NULL)
rs = zfs_btree_next(t, &where, &where);
ASSERT(rs != NULL);
}
if ((rs_get_end(rs, rt) - rs_get_start(rs, rt)) >= size) {
*cursor = rs_get_start(rs, rt) + size;
return (rs_get_start(rs, rt));
}
return (-1ULL);
}
const metaslab_ops_t zfs_metaslab_ops = {
metaslab_ndf_alloc
};
#endif /* WITH_NDF_BLOCK_ALLOCATOR */
/*
* ==========================================================================
* Metaslabs
* ==========================================================================
*/
/*
* Wait for any in-progress metaslab loads to complete.
*/
static void
metaslab_load_wait(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
while (msp->ms_loading) {
ASSERT(!msp->ms_loaded);
cv_wait(&msp->ms_load_cv, &msp->ms_lock);
}
}
/*
* Wait for any in-progress flushing to complete.
*/
static void
metaslab_flush_wait(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
while (msp->ms_flushing)
cv_wait(&msp->ms_flush_cv, &msp->ms_lock);
}
static unsigned int
metaslab_idx_func(multilist_t *ml, void *arg)
{
metaslab_t *msp = arg;
/*
* ms_id values are allocated sequentially, so full 64bit
* division would be a waste of time, so limit it to 32 bits.
*/
return ((unsigned int)msp->ms_id % multilist_get_num_sublists(ml));
}
uint64_t
metaslab_allocated_space(metaslab_t *msp)
{
return (msp->ms_allocated_space);
}
/*
* Verify that the space accounting on disk matches the in-core range_trees.
*/
static void
metaslab_verify_space(metaslab_t *msp, uint64_t txg)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
uint64_t allocating = 0;
uint64_t sm_free_space, msp_free_space;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(!msp->ms_condensing);
if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
return;
/*
* We can only verify the metaslab space when we're called
* from syncing context with a loaded metaslab that has an
* allocated space map. Calling this in non-syncing context
* does not provide a consistent view of the metaslab since
* we're performing allocations in the future.
*/
if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
!msp->ms_loaded)
return;
/*
* Even though the smp_alloc field can get negative,
* when it comes to a metaslab's space map, that should
* never be the case.
*/
ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0);
ASSERT3U(space_map_allocated(msp->ms_sm), >=,
range_tree_space(msp->ms_unflushed_frees));
ASSERT3U(metaslab_allocated_space(msp), ==,
space_map_allocated(msp->ms_sm) +
range_tree_space(msp->ms_unflushed_allocs) -
range_tree_space(msp->ms_unflushed_frees));
sm_free_space = msp->ms_size - metaslab_allocated_space(msp);
/*
* Account for future allocations since we would have
* already deducted that space from the ms_allocatable.
*/
for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
allocating +=
range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
}
ASSERT3U(allocating + msp->ms_allocated_this_txg, ==,
msp->ms_allocating_total);
ASSERT3U(msp->ms_deferspace, ==,
range_tree_space(msp->ms_defer[0]) +
range_tree_space(msp->ms_defer[1]));
msp_free_space = range_tree_space(msp->ms_allocatable) + allocating +
msp->ms_deferspace + range_tree_space(msp->ms_freed);
VERIFY3U(sm_free_space, ==, msp_free_space);
}
static void
metaslab_aux_histograms_clear(metaslab_t *msp)
{
/*
* Auxiliary histograms are only cleared when resetting them,
* which can only happen while the metaslab is loaded.
*/
ASSERT(msp->ms_loaded);
memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
for (int t = 0; t < TXG_DEFER_SIZE; t++)
memset(msp->ms_deferhist[t], 0, sizeof (msp->ms_deferhist[t]));
}
static void
metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift,
range_tree_t *rt)
{
/*
* This is modeled after space_map_histogram_add(), so refer to that
* function for implementation details. We want this to work like
* the space map histogram, and not the range tree histogram, as we
* are essentially constructing a delta that will be later subtracted
* from the space map histogram.
*/
int idx = 0;
for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
ASSERT3U(i, >=, idx + shift);
histogram[idx] += rt->rt_histogram[i] << (i - idx - shift);
if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
ASSERT3U(idx + shift, ==, i);
idx++;
ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
}
}
}
/*
* Called at every sync pass that the metaslab gets synced.
*
* The reason is that we want our auxiliary histograms to be updated
* wherever the metaslab's space map histogram is updated. This way
* we stay consistent on which parts of the metaslab space map's
* histogram are currently not available for allocations (e.g because
* they are in the defer, freed, and freeing trees).
*/
static void
metaslab_aux_histograms_update(metaslab_t *msp)
{
space_map_t *sm = msp->ms_sm;
ASSERT(sm != NULL);
/*
* This is similar to the metaslab's space map histogram updates
* that take place in metaslab_sync(). The only difference is that
* we only care about segments that haven't made it into the
* ms_allocatable tree yet.
*/
if (msp->ms_loaded) {
metaslab_aux_histograms_clear(msp);
metaslab_aux_histogram_add(msp->ms_synchist,
sm->sm_shift, msp->ms_freed);
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
metaslab_aux_histogram_add(msp->ms_deferhist[t],
sm->sm_shift, msp->ms_defer[t]);
}
}
metaslab_aux_histogram_add(msp->ms_synchist,
sm->sm_shift, msp->ms_freeing);
}
/*
* Called every time we are done syncing (writing to) the metaslab,
* i.e. at the end of each sync pass.
* [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist]
*/
static void
metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
space_map_t *sm = msp->ms_sm;
if (sm == NULL) {
/*
* We came here from metaslab_init() when creating/opening a
* pool, looking at a metaslab that hasn't had any allocations
* yet.
*/
return;
}
/*
* This is similar to the actions that we take for the ms_freed
* and ms_defer trees in metaslab_sync_done().
*/
uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE;
if (defer_allowed) {
memcpy(msp->ms_deferhist[hist_index], msp->ms_synchist,
sizeof (msp->ms_synchist));
} else {
memset(msp->ms_deferhist[hist_index], 0,
sizeof (msp->ms_deferhist[hist_index]));
}
memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
}
/*
* Ensure that the metaslab's weight and fragmentation are consistent
* with the contents of the histogram (either the range tree's histogram
* or the space map's depending whether the metaslab is loaded).
*/
static void
metaslab_verify_weight_and_frag(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
return;
/*
* We can end up here from vdev_remove_complete(), in which case we
* cannot do these assertions because we hold spa config locks and
* thus we are not allowed to read from the DMU.
*
* We check if the metaslab group has been removed and if that's
* the case we return immediately as that would mean that we are
* here from the aforementioned code path.
*/
if (msp->ms_group == NULL)
return;
/*
* Devices being removed always return a weight of 0 and leave
* fragmentation and ms_max_size as is - there is nothing for
* us to verify here.
*/
vdev_t *vd = msp->ms_group->mg_vd;
if (vd->vdev_removing)
return;
/*
* If the metaslab is dirty it probably means that we've done
* some allocations or frees that have changed our histograms
* and thus the weight.
*/
for (int t = 0; t < TXG_SIZE; t++) {
if (txg_list_member(&vd->vdev_ms_list, msp, t))
return;
}
/*
* This verification checks that our in-memory state is consistent
* with what's on disk. If the pool is read-only then there aren't
* any changes and we just have the initially-loaded state.
*/
if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa))
return;
/* some extra verification for in-core tree if you can */
if (msp->ms_loaded) {
range_tree_stat_verify(msp->ms_allocatable);
VERIFY(space_map_histogram_verify(msp->ms_sm,
msp->ms_allocatable));
}
uint64_t weight = msp->ms_weight;
uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight);
uint64_t frag = msp->ms_fragmentation;
uint64_t max_segsize = msp->ms_max_size;
msp->ms_weight = 0;
msp->ms_fragmentation = 0;
/*
* This function is used for verification purposes and thus should
* not introduce any side-effects/mutations on the system's state.
*
* Regardless of whether metaslab_weight() thinks this metaslab
* should be active or not, we want to ensure that the actual weight
* (and therefore the value of ms_weight) would be the same if it
* was to be recalculated at this point.
*
* In addition we set the nodirty flag so metaslab_weight() does
* not dirty the metaslab for future TXGs (e.g. when trying to
* force condensing to upgrade the metaslab spacemaps).
*/
msp->ms_weight = metaslab_weight(msp, B_TRUE) | was_active;
VERIFY3U(max_segsize, ==, msp->ms_max_size);
/*
* If the weight type changed then there is no point in doing
* verification. Revert fields to their original values.
*/
if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) ||
(!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) {
msp->ms_fragmentation = frag;
msp->ms_weight = weight;
return;
}
VERIFY3U(msp->ms_fragmentation, ==, frag);
VERIFY3U(msp->ms_weight, ==, weight);
}
/*
* If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from
* this class that was used longest ago, and attempt to unload it. We don't
* want to spend too much time in this loop to prevent performance
* degradation, and we expect that most of the time this operation will
* succeed. Between that and the normal unloading processing during txg sync,
* we expect this to keep the metaslab memory usage under control.
*/
static void
metaslab_potentially_evict(metaslab_class_t *mc)
{
#ifdef _KERNEL
uint64_t allmem = arc_all_memory();
uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
uint64_t size = spl_kmem_cache_entry_size(zfs_btree_leaf_cache);
uint_t tries = 0;
for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size &&
tries < multilist_get_num_sublists(&mc->mc_metaslab_txg_list) * 2;
tries++) {
unsigned int idx = multilist_get_random_index(
&mc->mc_metaslab_txg_list);
multilist_sublist_t *mls =
multilist_sublist_lock(&mc->mc_metaslab_txg_list, idx);
metaslab_t *msp = multilist_sublist_head(mls);
multilist_sublist_unlock(mls);
while (msp != NULL && allmem * zfs_metaslab_mem_limit / 100 <
inuse * size) {
VERIFY3P(mls, ==, multilist_sublist_lock(
&mc->mc_metaslab_txg_list, idx));
ASSERT3U(idx, ==,
metaslab_idx_func(&mc->mc_metaslab_txg_list, msp));
if (!multilist_link_active(&msp->ms_class_txg_node)) {
multilist_sublist_unlock(mls);
break;
}
metaslab_t *next_msp = multilist_sublist_next(mls, msp);
multilist_sublist_unlock(mls);
/*
* If the metaslab is currently loading there are two
* cases. If it's the metaslab we're evicting, we
* can't continue on or we'll panic when we attempt to
* recursively lock the mutex. If it's another
* metaslab that's loading, it can be safely skipped,
* since we know it's very new and therefore not a
* good eviction candidate. We check later once the
* lock is held that the metaslab is fully loaded
* before actually unloading it.
*/
if (msp->ms_loading) {
msp = next_msp;
inuse =
spl_kmem_cache_inuse(zfs_btree_leaf_cache);
continue;
}
/*
* We can't unload metaslabs with no spacemap because
* they're not ready to be unloaded yet. We can't
* unload metaslabs with outstanding allocations
* because doing so could cause the metaslab's weight
* to decrease while it's unloaded, which violates an
* invariant that we use to prevent unnecessary
* loading. We also don't unload metaslabs that are
* currently active because they are high-weight
* metaslabs that are likely to be used in the near
* future.
*/
mutex_enter(&msp->ms_lock);
if (msp->ms_allocator == -1 && msp->ms_sm != NULL &&
msp->ms_allocating_total == 0) {
metaslab_unload(msp);
}
mutex_exit(&msp->ms_lock);
msp = next_msp;
inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
}
}
#else
(void) mc, (void) zfs_metaslab_mem_limit;
#endif
}
static int
metaslab_load_impl(metaslab_t *msp)
{
int error = 0;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loading);
ASSERT(!msp->ms_condensing);
/*
* We temporarily drop the lock to unblock other operations while we
* are reading the space map. Therefore, metaslab_sync() and
* metaslab_sync_done() can run at the same time as we do.
*
* If we are using the log space maps, metaslab_sync() can't write to
* the metaslab's space map while we are loading as we only write to
* it when we are flushing the metaslab, and that can't happen while
* we are loading it.
*
* If we are not using log space maps though, metaslab_sync() can
* append to the space map while we are loading. Therefore we load
* only entries that existed when we started the load. Additionally,
* metaslab_sync_done() has to wait for the load to complete because
* there are potential races like metaslab_load() loading parts of the
* space map that are currently being appended by metaslab_sync(). If
* we didn't, the ms_allocatable would have entries that
* metaslab_sync_done() would try to re-add later.
*
* That's why before dropping the lock we remember the synced length
* of the metaslab and read up to that point of the space map,
* ignoring entries appended by metaslab_sync() that happen after we
* drop the lock.
*/
uint64_t length = msp->ms_synced_length;
mutex_exit(&msp->ms_lock);
hrtime_t load_start = gethrtime();
metaslab_rt_arg_t *mrap;
if (msp->ms_allocatable->rt_arg == NULL) {
mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
} else {
mrap = msp->ms_allocatable->rt_arg;
msp->ms_allocatable->rt_ops = NULL;
msp->ms_allocatable->rt_arg = NULL;
}
mrap->mra_bt = &msp->ms_allocatable_by_size;
mrap->mra_floor_shift = metaslab_by_size_min_shift;
if (msp->ms_sm != NULL) {
error = space_map_load_length(msp->ms_sm, msp->ms_allocatable,
SM_FREE, length);
/* Now, populate the size-sorted tree. */
metaslab_rt_create(msp->ms_allocatable, mrap);
msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
msp->ms_allocatable->rt_arg = mrap;
struct mssa_arg arg = {0};
arg.rt = msp->ms_allocatable;
arg.mra = mrap;
range_tree_walk(msp->ms_allocatable, metaslab_size_sorted_add,
&arg);
} else {
/*
* Add the size-sorted tree first, since we don't need to load
* the metaslab from the spacemap.
*/
metaslab_rt_create(msp->ms_allocatable, mrap);
msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
msp->ms_allocatable->rt_arg = mrap;
/*
* The space map has not been allocated yet, so treat
* all the space in the metaslab as free and add it to the
* ms_allocatable tree.
*/
range_tree_add(msp->ms_allocatable,
msp->ms_start, msp->ms_size);
if (msp->ms_new) {
/*
* If the ms_sm doesn't exist, this means that this
* metaslab hasn't gone through metaslab_sync() and
* thus has never been dirtied. So we shouldn't
* expect any unflushed allocs or frees from previous
* TXGs.
*/
ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
}
}
/*
* We need to grab the ms_sync_lock to prevent metaslab_sync() from
* changing the ms_sm (or log_sm) and the metaslab's range trees
* while we are about to use them and populate the ms_allocatable.
* The ms_lock is insufficient for this because metaslab_sync() doesn't
* hold the ms_lock while writing the ms_checkpointing tree to disk.
*/
mutex_enter(&msp->ms_sync_lock);
mutex_enter(&msp->ms_lock);
ASSERT(!msp->ms_condensing);
ASSERT(!msp->ms_flushing);
if (error != 0) {
mutex_exit(&msp->ms_sync_lock);
return (error);
}
ASSERT3P(msp->ms_group, !=, NULL);
msp->ms_loaded = B_TRUE;
/*
* Apply all the unflushed changes to ms_allocatable right
* away so any manipulations we do below have a clear view
* of what is allocated and what is free.
*/
range_tree_walk(msp->ms_unflushed_allocs,
range_tree_remove, msp->ms_allocatable);
range_tree_walk(msp->ms_unflushed_frees,
range_tree_add, msp->ms_allocatable);
ASSERT3P(msp->ms_group, !=, NULL);
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
if (spa_syncing_log_sm(spa) != NULL) {
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_LOG_SPACEMAP));
/*
* If we use a log space map we add all the segments
* that are in ms_unflushed_frees so they are available
* for allocation.
*
* ms_allocatable needs to contain all free segments
* that are ready for allocations (thus not segments
* from ms_freeing, ms_freed, and the ms_defer trees).
* But if we grab the lock in this code path at a sync
* pass later that 1, then it also contains the
* segments of ms_freed (they were added to it earlier
* in this path through ms_unflushed_frees). So we
* need to remove all the segments that exist in
* ms_freed from ms_allocatable as they will be added
* later in metaslab_sync_done().
*
* When there's no log space map, the ms_allocatable
* correctly doesn't contain any segments that exist
* in ms_freed [see ms_synced_length].
*/
range_tree_walk(msp->ms_freed,
range_tree_remove, msp->ms_allocatable);
}
/*
* If we are not using the log space map, ms_allocatable
* contains the segments that exist in the ms_defer trees
* [see ms_synced_length]. Thus we need to remove them
* from ms_allocatable as they will be added again in
* metaslab_sync_done().
*
* If we are using the log space map, ms_allocatable still
* contains the segments that exist in the ms_defer trees.
* Not because it read them through the ms_sm though. But
* because these segments are part of ms_unflushed_frees
* whose segments we add to ms_allocatable earlier in this
* code path.
*/
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_walk(msp->ms_defer[t],
range_tree_remove, msp->ms_allocatable);
}
/*
* Call metaslab_recalculate_weight_and_sort() now that the
* metaslab is loaded so we get the metaslab's real weight.
*
* Unless this metaslab was created with older software and
* has not yet been converted to use segment-based weight, we
* expect the new weight to be better or equal to the weight
* that the metaslab had while it was not loaded. This is
* because the old weight does not take into account the
* consolidation of adjacent segments between TXGs. [see
* comment for ms_synchist and ms_deferhist[] for more info]
*/
uint64_t weight = msp->ms_weight;
uint64_t max_size = msp->ms_max_size;
metaslab_recalculate_weight_and_sort(msp);
if (!WEIGHT_IS_SPACEBASED(weight))
ASSERT3U(weight, <=, msp->ms_weight);
msp->ms_max_size = metaslab_largest_allocatable(msp);
ASSERT3U(max_size, <=, msp->ms_max_size);
hrtime_t load_end = gethrtime();
msp->ms_load_time = load_end;
zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, "
"ms_id %llu, smp_length %llu, "
"unflushed_allocs %llu, unflushed_frees %llu, "
"freed %llu, defer %llu + %llu, unloaded time %llu ms, "
"loading_time %lld ms, ms_max_size %llu, "
"max size error %lld, "
"old_weight %llx, new_weight %llx",
(u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
(u_longlong_t)msp->ms_id,
(u_longlong_t)space_map_length(msp->ms_sm),
(u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
(u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
(u_longlong_t)range_tree_space(msp->ms_freed),
(u_longlong_t)range_tree_space(msp->ms_defer[0]),
(u_longlong_t)range_tree_space(msp->ms_defer[1]),
(longlong_t)((load_start - msp->ms_unload_time) / 1000000),
(longlong_t)((load_end - load_start) / 1000000),
(u_longlong_t)msp->ms_max_size,
(u_longlong_t)msp->ms_max_size - max_size,
(u_longlong_t)weight, (u_longlong_t)msp->ms_weight);
metaslab_verify_space(msp, spa_syncing_txg(spa));
mutex_exit(&msp->ms_sync_lock);
return (0);
}
int
metaslab_load(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* There may be another thread loading the same metaslab, if that's
* the case just wait until the other thread is done and return.
*/
metaslab_load_wait(msp);
if (msp->ms_loaded)
return (0);
VERIFY(!msp->ms_loading);
ASSERT(!msp->ms_condensing);
/*
* We set the loading flag BEFORE potentially dropping the lock to
* wait for an ongoing flush (see ms_flushing below). This way other
* threads know that there is already a thread that is loading this
* metaslab.
*/
msp->ms_loading = B_TRUE;
/*
* Wait for any in-progress flushing to finish as we drop the ms_lock
* both here (during space_map_load()) and in metaslab_flush() (when
* we flush our changes to the ms_sm).
*/
if (msp->ms_flushing)
metaslab_flush_wait(msp);
/*
* In the possibility that we were waiting for the metaslab to be
* flushed (where we temporarily dropped the ms_lock), ensure that
* no one else loaded the metaslab somehow.
*/
ASSERT(!msp->ms_loaded);
/*
* If we're loading a metaslab in the normal class, consider evicting
* another one to keep our memory usage under the limit defined by the
* zfs_metaslab_mem_limit tunable.
*/
if (spa_normal_class(msp->ms_group->mg_class->mc_spa) ==
msp->ms_group->mg_class) {
metaslab_potentially_evict(msp->ms_group->mg_class);
}
int error = metaslab_load_impl(msp);
ASSERT(MUTEX_HELD(&msp->ms_lock));
msp->ms_loading = B_FALSE;
cv_broadcast(&msp->ms_load_cv);
return (error);
}
void
metaslab_unload(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* This can happen if a metaslab is selected for eviction (in
* metaslab_potentially_evict) and then unloaded during spa_sync (via
* metaslab_class_evict_old).
*/
if (!msp->ms_loaded)
return;
range_tree_vacate(msp->ms_allocatable, NULL, NULL);
msp->ms_loaded = B_FALSE;
msp->ms_unload_time = gethrtime();
msp->ms_activation_weight = 0;
msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
if (msp->ms_group != NULL) {
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (multilist_link_active(&msp->ms_class_txg_node))
multilist_sublist_remove(mls, msp);
multilist_sublist_unlock(mls);
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, "
"ms_id %llu, weight %llx, "
"selected txg %llu (%llu ms ago), alloc_txg %llu, "
"loaded %llu ms ago, max_size %llu",
(u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
(u_longlong_t)msp->ms_id,
(u_longlong_t)msp->ms_weight,
(u_longlong_t)msp->ms_selected_txg,
(u_longlong_t)(msp->ms_unload_time -
msp->ms_selected_time) / 1000 / 1000,
(u_longlong_t)msp->ms_alloc_txg,
(u_longlong_t)(msp->ms_unload_time -
msp->ms_load_time) / 1000 / 1000,
(u_longlong_t)msp->ms_max_size);
}
/*
* We explicitly recalculate the metaslab's weight based on its space
* map (as it is now not loaded). We want unload metaslabs to always
* have their weights calculated from the space map histograms, while
* loaded ones have it calculated from their in-core range tree
* [see metaslab_load()]. This way, the weight reflects the information
* available in-core, whether it is loaded or not.
*
* If ms_group == NULL means that we came here from metaslab_fini(),
* at which point it doesn't make sense for us to do the recalculation
* and the sorting.
*/
if (msp->ms_group != NULL)
metaslab_recalculate_weight_and_sort(msp);
}
/*
* We want to optimize the memory use of the per-metaslab range
* trees. To do this, we store the segments in the range trees in
* units of sectors, zero-indexing from the start of the metaslab. If
* the vdev_ms_shift - the vdev_ashift is less than 32, we can store
* the ranges using two uint32_ts, rather than two uint64_ts.
*/
range_seg_type_t
metaslab_calculate_range_tree_type(vdev_t *vdev, metaslab_t *msp,
uint64_t *start, uint64_t *shift)
{
if (vdev->vdev_ms_shift - vdev->vdev_ashift < 32 &&
!zfs_metaslab_force_large_segs) {
*shift = vdev->vdev_ashift;
*start = msp->ms_start;
return (RANGE_SEG32);
} else {
*shift = 0;
*start = 0;
return (RANGE_SEG64);
}
}
void
metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (multilist_link_active(&msp->ms_class_txg_node))
multilist_sublist_remove(mls, msp);
msp->ms_selected_txg = txg;
msp->ms_selected_time = gethrtime();
multilist_sublist_insert_tail(mls, msp);
multilist_sublist_unlock(mls);
}
void
metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta,
int64_t defer_delta, int64_t space_delta)
{
vdev_space_update(vd, alloc_delta, defer_delta, space_delta);
ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent);
ASSERT(vd->vdev_ms_count != 0);
metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta,
vdev_deflated_space(vd, space_delta));
}
int
metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object,
uint64_t txg, metaslab_t **msp)
{
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
metaslab_t *ms;
int error;
ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL);
multilist_link_init(&ms->ms_class_txg_node);
ms->ms_id = id;
ms->ms_start = id << vd->vdev_ms_shift;
ms->ms_size = 1ULL << vd->vdev_ms_shift;
ms->ms_allocator = -1;
ms->ms_new = B_TRUE;
vdev_ops_t *ops = vd->vdev_ops;
if (ops->vdev_op_metaslab_init != NULL)
ops->vdev_op_metaslab_init(vd, &ms->ms_start, &ms->ms_size);
/*
* We only open space map objects that already exist. All others
* will be opened when we finally allocate an object for it. For
* readonly pools there is no need to open the space map object.
*
* Note:
* When called from vdev_expand(), we can't call into the DMU as
* we are holding the spa_config_lock as a writer and we would
* deadlock [see relevant comment in vdev_metaslab_init()]. in
* that case, the object parameter is zero though, so we won't
* call into the DMU.
*/
if (object != 0 && !(spa->spa_mode == SPA_MODE_READ &&
!spa->spa_read_spacemaps)) {
error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
ms->ms_size, vd->vdev_ashift);
if (error != 0) {
kmem_free(ms, sizeof (metaslab_t));
return (error);
}
ASSERT(ms->ms_sm != NULL);
ms->ms_allocated_space = space_map_allocated(ms->ms_sm);
}
uint64_t shift, start;
range_seg_type_t type =
metaslab_calculate_range_tree_type(vd, ms, &start, &shift);
ms->ms_allocatable = range_tree_create(NULL, type, NULL, start, shift);
for (int t = 0; t < TXG_SIZE; t++) {
ms->ms_allocating[t] = range_tree_create(NULL, type,
NULL, start, shift);
}
ms->ms_freeing = range_tree_create(NULL, type, NULL, start, shift);
ms->ms_freed = range_tree_create(NULL, type, NULL, start, shift);
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
ms->ms_defer[t] = range_tree_create(NULL, type, NULL,
start, shift);
}
ms->ms_checkpointing =
range_tree_create(NULL, type, NULL, start, shift);
ms->ms_unflushed_allocs =
range_tree_create(NULL, type, NULL, start, shift);
metaslab_rt_arg_t *mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
mrap->mra_bt = &ms->ms_unflushed_frees_by_size;
mrap->mra_floor_shift = metaslab_by_size_min_shift;
ms->ms_unflushed_frees = range_tree_create(&metaslab_rt_ops,
type, mrap, start, shift);
ms->ms_trim = range_tree_create(NULL, type, NULL, start, shift);
metaslab_group_add(mg, ms);
metaslab_set_fragmentation(ms, B_FALSE);
/*
* If we're opening an existing pool (txg == 0) or creating
* a new one (txg == TXG_INITIAL), all space is available now.
* If we're adding space to an existing pool, the new space
* does not become available until after this txg has synced.
* The metaslab's weight will also be initialized when we sync
* out this txg. This ensures that we don't attempt to allocate
* from it before we have initialized it completely.
*/
if (txg <= TXG_INITIAL) {
metaslab_sync_done(ms, 0);
metaslab_space_update(vd, mg->mg_class,
metaslab_allocated_space(ms), 0, 0);
}
if (txg != 0) {
vdev_dirty(vd, 0, NULL, txg);
vdev_dirty(vd, VDD_METASLAB, ms, txg);
}
*msp = ms;
return (0);
}
static void
metaslab_fini_flush_data(metaslab_t *msp)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
if (metaslab_unflushed_txg(msp) == 0) {
ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL),
==, NULL);
return;
}
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
mutex_enter(&spa->spa_flushed_ms_lock);
avl_remove(&spa->spa_metaslabs_by_flushed, msp);
mutex_exit(&spa->spa_flushed_ms_lock);
spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp));
spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp),
metaslab_unflushed_dirty(msp));
}
uint64_t
metaslab_unflushed_changes_memused(metaslab_t *ms)
{
return ((range_tree_numsegs(ms->ms_unflushed_allocs) +
range_tree_numsegs(ms->ms_unflushed_frees)) *
ms->ms_unflushed_allocs->rt_root.bt_elem_size);
}
void
metaslab_fini(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
metaslab_fini_flush_data(msp);
metaslab_group_remove(mg, msp);
mutex_enter(&msp->ms_lock);
VERIFY(msp->ms_group == NULL);
/*
* If this metaslab hasn't been through metaslab_sync_done() yet its
* space hasn't been accounted for in its vdev and doesn't need to be
* subtracted.
*/
if (!msp->ms_new) {
metaslab_space_update(vd, mg->mg_class,
-metaslab_allocated_space(msp), 0, -msp->ms_size);
}
space_map_close(msp->ms_sm);
msp->ms_sm = NULL;
metaslab_unload(msp);
range_tree_destroy(msp->ms_allocatable);
range_tree_destroy(msp->ms_freeing);
range_tree_destroy(msp->ms_freed);
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
range_tree_destroy(msp->ms_unflushed_allocs);
range_tree_destroy(msp->ms_checkpointing);
range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
range_tree_destroy(msp->ms_unflushed_frees);
for (int t = 0; t < TXG_SIZE; t++) {
range_tree_destroy(msp->ms_allocating[t]);
}
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_destroy(msp->ms_defer[t]);
}
ASSERT0(msp->ms_deferspace);
for (int t = 0; t < TXG_SIZE; t++)
ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t));
range_tree_vacate(msp->ms_trim, NULL, NULL);
range_tree_destroy(msp->ms_trim);
mutex_exit(&msp->ms_lock);
cv_destroy(&msp->ms_load_cv);
cv_destroy(&msp->ms_flush_cv);
mutex_destroy(&msp->ms_lock);
mutex_destroy(&msp->ms_sync_lock);
ASSERT3U(msp->ms_allocator, ==, -1);
kmem_free(msp, sizeof (metaslab_t));
}
#define FRAGMENTATION_TABLE_SIZE 17
/*
* This table defines a segment size based fragmentation metric that will
* allow each metaslab to derive its own fragmentation value. This is done
* by calculating the space in each bucket of the spacemap histogram and
* multiplying that by the fragmentation metric in this table. Doing
* this for all buckets and dividing it by the total amount of free
* space in this metaslab (i.e. the total free space in all buckets) gives
* us the fragmentation metric. This means that a high fragmentation metric
* equates to most of the free space being comprised of small segments.
* Conversely, if the metric is low, then most of the free space is in
* large segments. A 10% change in fragmentation equates to approximately
* double the number of segments.
*
* This table defines 0% fragmented space using 16MB segments. Testing has
* shown that segments that are greater than or equal to 16MB do not suffer
* from drastic performance problems. Using this value, we derive the rest
* of the table. Since the fragmentation value is never stored on disk, it
* is possible to change these calculations in the future.
*/
static const int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
100, /* 512B */
100, /* 1K */
98, /* 2K */
95, /* 4K */
90, /* 8K */
80, /* 16K */
70, /* 32K */
60, /* 64K */
50, /* 128K */
40, /* 256K */
30, /* 512K */
20, /* 1M */
15, /* 2M */
10, /* 4M */
5, /* 8M */
0 /* 16M */
};
/*
* Calculate the metaslab's fragmentation metric and set ms_fragmentation.
* Setting this value to ZFS_FRAG_INVALID means that the metaslab has not
* been upgraded and does not support this metric. Otherwise, the return
* value should be in the range [0, 100].
*/
static void
metaslab_set_fragmentation(metaslab_t *msp, boolean_t nodirty)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
uint64_t fragmentation = 0;
uint64_t total = 0;
boolean_t feature_enabled = spa_feature_is_enabled(spa,
SPA_FEATURE_SPACEMAP_HISTOGRAM);
if (!feature_enabled) {
msp->ms_fragmentation = ZFS_FRAG_INVALID;
return;
}
/*
* A null space map means that the entire metaslab is free
* and thus is not fragmented.
*/
if (msp->ms_sm == NULL) {
msp->ms_fragmentation = 0;
return;
}
/*
* If this metaslab's space map has not been upgraded, flag it
* so that we upgrade next time we encounter it.
*/
if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
uint64_t txg = spa_syncing_txg(spa);
vdev_t *vd = msp->ms_group->mg_vd;
/*
* If we've reached the final dirty txg, then we must
* be shutting down the pool. We don't want to dirty
* any data past this point so skip setting the condense
* flag. We can retry this action the next time the pool
* is imported. We also skip marking this metaslab for
* condensing if the caller has explicitly set nodirty.
*/
if (!nodirty &&
spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
msp->ms_condense_wanted = B_TRUE;
vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
zfs_dbgmsg("txg %llu, requesting force condense: "
"ms_id %llu, vdev_id %llu", (u_longlong_t)txg,
(u_longlong_t)msp->ms_id,
(u_longlong_t)vd->vdev_id);
}
msp->ms_fragmentation = ZFS_FRAG_INVALID;
return;
}
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
uint64_t space = 0;
uint8_t shift = msp->ms_sm->sm_shift;
int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
FRAGMENTATION_TABLE_SIZE - 1);
if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
continue;
space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
total += space;
ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
fragmentation += space * zfs_frag_table[idx];
}
if (total > 0)
fragmentation /= total;
ASSERT3U(fragmentation, <=, 100);
msp->ms_fragmentation = fragmentation;
}
/*
* Compute a weight -- a selection preference value -- for the given metaslab.
* This is based on the amount of free space, the level of fragmentation,
* the LBA range, and whether the metaslab is loaded.
*/
static uint64_t
metaslab_space_weight(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
uint64_t weight, space;
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* The baseline weight is the metaslab's free space.
*/
space = msp->ms_size - metaslab_allocated_space(msp);
if (metaslab_fragmentation_factor_enabled &&
msp->ms_fragmentation != ZFS_FRAG_INVALID) {
/*
* Use the fragmentation information to inversely scale
* down the baseline weight. We need to ensure that we
* don't exclude this metaslab completely when it's 100%
* fragmented. To avoid this we reduce the fragmented value
* by 1.
*/
space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
/*
* If space < SPA_MINBLOCKSIZE, then we will not allocate from
* this metaslab again. The fragmentation metric may have
* decreased the space to something smaller than
* SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
* so that we can consume any remaining space.
*/
if (space > 0 && space < SPA_MINBLOCKSIZE)
space = SPA_MINBLOCKSIZE;
}
weight = space;
/*
* Modern disks have uniform bit density and constant angular velocity.
* Therefore, the outer recording zones are faster (higher bandwidth)
* than the inner zones by the ratio of outer to inner track diameter,
* which is typically around 2:1. We account for this by assigning
* higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
* In effect, this means that we'll select the metaslab with the most
* free bandwidth rather than simply the one with the most free space.
*/
if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
ASSERT(weight >= space && weight <= 2 * space);
}
/*
* If this metaslab is one we're actively using, adjust its
* weight to make it preferable to any inactive metaslab so
* we'll polish it off. If the fragmentation on this metaslab
* has exceed our threshold, then don't mark it active.
*/
if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
}
WEIGHT_SET_SPACEBASED(weight);
return (weight);
}
/*
* Return the weight of the specified metaslab, according to the segment-based
* weighting algorithm. The metaslab must be loaded. This function can
* be called within a sync pass since it relies only on the metaslab's
* range tree which is always accurate when the metaslab is loaded.
*/
static uint64_t
metaslab_weight_from_range_tree(metaslab_t *msp)
{
uint64_t weight = 0;
uint32_t segments = 0;
ASSERT(msp->ms_loaded);
for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
i--) {
uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
segments <<= 1;
segments += msp->ms_allocatable->rt_histogram[i];
/*
* The range tree provides more precision than the space map
* and must be downgraded so that all values fit within the
* space map's histogram. This allows us to compare loaded
* vs. unloaded metaslabs to determine which metaslab is
* considered "best".
*/
if (i > max_idx)
continue;
if (segments != 0) {
WEIGHT_SET_COUNT(weight, segments);
WEIGHT_SET_INDEX(weight, i);
WEIGHT_SET_ACTIVE(weight, 0);
break;
}
}
return (weight);
}
/*
* Calculate the weight based on the on-disk histogram. Should be applied
* only to unloaded metaslabs (i.e no incoming allocations) in-order to
* give results consistent with the on-disk state
*/
static uint64_t
metaslab_weight_from_spacemap(metaslab_t *msp)
{
space_map_t *sm = msp->ms_sm;
ASSERT(!msp->ms_loaded);
ASSERT(sm != NULL);
ASSERT3U(space_map_object(sm), !=, 0);
ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
/*
* Create a joint histogram from all the segments that have made
* it to the metaslab's space map histogram, that are not yet
* available for allocation because they are still in the freeing
* pipeline (e.g. freeing, freed, and defer trees). Then subtract
* these segments from the space map's histogram to get a more
* accurate weight.
*/
uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0};
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
deferspace_histogram[i] += msp->ms_synchist[i];
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
deferspace_histogram[i] += msp->ms_deferhist[t][i];
}
}
uint64_t weight = 0;
for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
ASSERT3U(sm->sm_phys->smp_histogram[i], >=,
deferspace_histogram[i]);
uint64_t count =
sm->sm_phys->smp_histogram[i] - deferspace_histogram[i];
if (count != 0) {
WEIGHT_SET_COUNT(weight, count);
WEIGHT_SET_INDEX(weight, i + sm->sm_shift);
WEIGHT_SET_ACTIVE(weight, 0);
break;
}
}
return (weight);
}
/*
* Compute a segment-based weight for the specified metaslab. The weight
* is determined by highest bucket in the histogram. The information
* for the highest bucket is encoded into the weight value.
*/
static uint64_t
metaslab_segment_weight(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
uint64_t weight = 0;
uint8_t shift = mg->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* The metaslab is completely free.
*/
if (metaslab_allocated_space(msp) == 0) {
int idx = highbit64(msp->ms_size) - 1;
int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
if (idx < max_idx) {
WEIGHT_SET_COUNT(weight, 1ULL);
WEIGHT_SET_INDEX(weight, idx);
} else {
WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
WEIGHT_SET_INDEX(weight, max_idx);
}
WEIGHT_SET_ACTIVE(weight, 0);
ASSERT(!WEIGHT_IS_SPACEBASED(weight));
return (weight);
}
ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
/*
* If the metaslab is fully allocated then just make the weight 0.
*/
if (metaslab_allocated_space(msp) == msp->ms_size)
return (0);
/*
* If the metaslab is already loaded, then use the range tree to
* determine the weight. Otherwise, we rely on the space map information
* to generate the weight.
*/
if (msp->ms_loaded) {
weight = metaslab_weight_from_range_tree(msp);
} else {
weight = metaslab_weight_from_spacemap(msp);
}
/*
* If the metaslab was active the last time we calculated its weight
* then keep it active. We want to consume the entire region that
* is associated with this weight.
*/
if (msp->ms_activation_weight != 0 && weight != 0)
WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
return (weight);
}
/*
* Determine if we should attempt to allocate from this metaslab. If the
* metaslab is loaded, then we can determine if the desired allocation
* can be satisfied by looking at the size of the maximum free segment
* on that metaslab. Otherwise, we make our decision based on the metaslab's
* weight. For segment-based weighting we can determine the maximum
* allocation based on the index encoded in its value. For space-based
* weights we rely on the entire weight (excluding the weight-type bit).
*/
static boolean_t
metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard)
{
/*
* If the metaslab is loaded, ms_max_size is definitive and we can use
* the fast check. If it's not, the ms_max_size is a lower bound (once
* set), and we should use the fast check as long as we're not in
* try_hard and it's been less than zfs_metaslab_max_size_cache_sec
* seconds since the metaslab was unloaded.
*/
if (msp->ms_loaded ||
(msp->ms_max_size != 0 && !try_hard && gethrtime() <
msp->ms_unload_time + SEC2NSEC(zfs_metaslab_max_size_cache_sec)))
return (msp->ms_max_size >= asize);
boolean_t should_allocate;
if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
/*
* The metaslab segment weight indicates segments in the
* range [2^i, 2^(i+1)), where i is the index in the weight.
* Since the asize might be in the middle of the range, we
* should attempt the allocation if asize < 2^(i+1).
*/
should_allocate = (asize <
1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
} else {
should_allocate = (asize <=
(msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
}
return (should_allocate);
}
static uint64_t
metaslab_weight(metaslab_t *msp, boolean_t nodirty)
{
vdev_t *vd = msp->ms_group->mg_vd;
spa_t *spa = vd->vdev_spa;
uint64_t weight;
ASSERT(MUTEX_HELD(&msp->ms_lock));
metaslab_set_fragmentation(msp, nodirty);
/*
* Update the maximum size. If the metaslab is loaded, this will
* ensure that we get an accurate maximum size if newly freed space
* has been added back into the free tree. If the metaslab is
* unloaded, we check if there's a larger free segment in the
* unflushed frees. This is a lower bound on the largest allocatable
* segment size. Coalescing of adjacent entries may reveal larger
* allocatable segments, but we aren't aware of those until loading
* the space map into a range tree.
*/
if (msp->ms_loaded) {
msp->ms_max_size = metaslab_largest_allocatable(msp);
} else {
msp->ms_max_size = MAX(msp->ms_max_size,
metaslab_largest_unflushed_free(msp));
}
/*
* Segment-based weighting requires space map histogram support.
*/
if (zfs_metaslab_segment_weight_enabled &&
spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
(msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
sizeof (space_map_phys_t))) {
weight = metaslab_segment_weight(msp);
} else {
weight = metaslab_space_weight(msp);
}
return (weight);
}
void
metaslab_recalculate_weight_and_sort(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/* note: we preserve the mask (e.g. indication of primary, etc..) */
uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
metaslab_group_sort(msp->ms_group, msp,
metaslab_weight(msp, B_FALSE) | was_active);
}
static int
metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp,
int allocator, uint64_t activation_weight)
{
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* If we're activating for the claim code, we don't want to actually
* set the metaslab up for a specific allocator.
*/
if (activation_weight == METASLAB_WEIGHT_CLAIM) {
ASSERT0(msp->ms_activation_weight);
msp->ms_activation_weight = msp->ms_weight;
metaslab_group_sort(mg, msp, msp->ms_weight |
activation_weight);
return (0);
}
metaslab_t **mspp = (activation_weight == METASLAB_WEIGHT_PRIMARY ?
&mga->mga_primary : &mga->mga_secondary);
mutex_enter(&mg->mg_lock);
if (*mspp != NULL) {
mutex_exit(&mg->mg_lock);
return (EEXIST);
}
*mspp = msp;
ASSERT3S(msp->ms_allocator, ==, -1);
msp->ms_allocator = allocator;
msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY);
ASSERT0(msp->ms_activation_weight);
msp->ms_activation_weight = msp->ms_weight;
metaslab_group_sort_impl(mg, msp,
msp->ms_weight | activation_weight);
mutex_exit(&mg->mg_lock);
return (0);
}
static int
metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* The current metaslab is already activated for us so there
* is nothing to do. Already activated though, doesn't mean
* that this metaslab is activated for our allocator nor our
* requested activation weight. The metaslab could have started
* as an active one for our allocator but changed allocators
* while we were waiting to grab its ms_lock or we stole it
* [see find_valid_metaslab()]. This means that there is a
* possibility of passivating a metaslab of another allocator
* or from a different activation mask, from this thread.
*/
if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
ASSERT(msp->ms_loaded);
return (0);
}
int error = metaslab_load(msp);
if (error != 0) {
metaslab_group_sort(msp->ms_group, msp, 0);
return (error);
}
/*
* When entering metaslab_load() we may have dropped the
* ms_lock because we were loading this metaslab, or we
* were waiting for another thread to load it for us. In
* that scenario, we recheck the weight of the metaslab
* to see if it was activated by another thread.
*
* If the metaslab was activated for another allocator or
* it was activated with a different activation weight (e.g.
* we wanted to make it a primary but it was activated as
* secondary) we return error (EBUSY).
*
* If the metaslab was activated for the same allocator
* and requested activation mask, skip activating it.
*/
if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
if (msp->ms_allocator != allocator)
return (EBUSY);
if ((msp->ms_weight & activation_weight) == 0)
return (SET_ERROR(EBUSY));
EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY),
msp->ms_primary);
return (0);
}
/*
* If the metaslab has literally 0 space, it will have weight 0. In
* that case, don't bother activating it. This can happen if the
* metaslab had space during find_valid_metaslab, but another thread
* loaded it and used all that space while we were waiting to grab the
* lock.
*/
if (msp->ms_weight == 0) {
ASSERT0(range_tree_space(msp->ms_allocatable));
return (SET_ERROR(ENOSPC));
}
if ((error = metaslab_activate_allocator(msp->ms_group, msp,
allocator, activation_weight)) != 0) {
return (error);
}
ASSERT(msp->ms_loaded);
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
return (0);
}
static void
metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp,
uint64_t weight)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loaded);
if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
metaslab_group_sort(mg, msp, weight);
return;
}
mutex_enter(&mg->mg_lock);
ASSERT3P(msp->ms_group, ==, mg);
ASSERT3S(0, <=, msp->ms_allocator);
ASSERT3U(msp->ms_allocator, <, mg->mg_allocators);
metaslab_group_allocator_t *mga = &mg->mg_allocator[msp->ms_allocator];
if (msp->ms_primary) {
ASSERT3P(mga->mga_primary, ==, msp);
ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
mga->mga_primary = NULL;
} else {
ASSERT3P(mga->mga_secondary, ==, msp);
ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
mga->mga_secondary = NULL;
}
msp->ms_allocator = -1;
metaslab_group_sort_impl(mg, msp, weight);
mutex_exit(&mg->mg_lock);
}
static void
metaslab_passivate(metaslab_t *msp, uint64_t weight)
{
uint64_t size __maybe_unused = weight & ~METASLAB_WEIGHT_TYPE;
/*
* If size < SPA_MINBLOCKSIZE, then we will not allocate from
* this metaslab again. In that case, it had better be empty,
* or we would be leaving space on the table.
*/
ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) ||
size >= SPA_MINBLOCKSIZE ||
range_tree_space(msp->ms_allocatable) == 0);
ASSERT0(weight & METASLAB_ACTIVE_MASK);
ASSERT(msp->ms_activation_weight != 0);
msp->ms_activation_weight = 0;
metaslab_passivate_allocator(msp->ms_group, msp, weight);
ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK);
}
/*
* Segment-based metaslabs are activated once and remain active until
* we either fail an allocation attempt (similar to space-based metaslabs)
* or have exhausted the free space in zfs_metaslab_switch_threshold
* buckets since the metaslab was activated. This function checks to see
* if we've exhausted the zfs_metaslab_switch_threshold buckets in the
* metaslab and passivates it proactively. This will allow us to select a
* metaslab with a larger contiguous region, if any, remaining within this
* metaslab group. If we're in sync pass > 1, then we continue using this
* metaslab so that we don't dirty more block and cause more sync passes.
*/
static void
metaslab_segment_may_passivate(metaslab_t *msp)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
return;
/*
* Since we are in the middle of a sync pass, the most accurate
* information that is accessible to us is the in-core range tree
* histogram; calculate the new weight based on that information.
*/
uint64_t weight = metaslab_weight_from_range_tree(msp);
int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
int current_idx = WEIGHT_GET_INDEX(weight);
if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
metaslab_passivate(msp, weight);
}
static void
metaslab_preload(void *arg)
{
metaslab_t *msp = arg;
metaslab_class_t *mc = msp->ms_group->mg_class;
spa_t *spa = mc->mc_spa;
fstrans_cookie_t cookie = spl_fstrans_mark();
ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
mutex_enter(&msp->ms_lock);
(void) metaslab_load(msp);
metaslab_set_selected_txg(msp, spa_syncing_txg(spa));
mutex_exit(&msp->ms_lock);
spl_fstrans_unmark(cookie);
}
static void
metaslab_group_preload(metaslab_group_t *mg)
{
spa_t *spa = mg->mg_vd->vdev_spa;
metaslab_t *msp;
avl_tree_t *t = &mg->mg_metaslab_tree;
int m = 0;
if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
taskq_wait_outstanding(mg->mg_taskq, 0);
return;
}
mutex_enter(&mg->mg_lock);
/*
* Load the next potential metaslabs
*/
for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
ASSERT3P(msp->ms_group, ==, mg);
/*
* We preload only the maximum number of metaslabs specified
* by metaslab_preload_limit. If a metaslab is being forced
* to condense then we preload it too. This will ensure
* that force condensing happens in the next txg.
*/
if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
continue;
}
VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
msp, TQ_SLEEP) != TASKQID_INVALID);
}
mutex_exit(&mg->mg_lock);
}
/*
* Determine if the space map's on-disk footprint is past our tolerance for
* inefficiency. We would like to use the following criteria to make our
* decision:
*
* 1. Do not condense if the size of the space map object would dramatically
* increase as a result of writing out the free space range tree.
*
* 2. Condense if the on on-disk space map representation is at least
* zfs_condense_pct/100 times the size of the optimal representation
* (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB).
*
* 3. Do not condense if the on-disk size of the space map does not actually
* decrease.
*
* Unfortunately, we cannot compute the on-disk size of the space map in this
* context because we cannot accurately compute the effects of compression, etc.
* Instead, we apply the heuristic described in the block comment for
* zfs_metaslab_condense_block_threshold - we only condense if the space used
* is greater than a threshold number of blocks.
*/
static boolean_t
metaslab_should_condense(metaslab_t *msp)
{
space_map_t *sm = msp->ms_sm;
vdev_t *vd = msp->ms_group->mg_vd;
uint64_t vdev_blocksize = 1ULL << vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loaded);
ASSERT(sm != NULL);
ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1);
/*
* We always condense metaslabs that are empty and metaslabs for
* which a condense request has been made.
*/
if (range_tree_numsegs(msp->ms_allocatable) == 0 ||
msp->ms_condense_wanted)
return (B_TRUE);
uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize);
uint64_t object_size = space_map_length(sm);
uint64_t optimal_size = space_map_estimate_optimal_size(sm,
msp->ms_allocatable, SM_NO_VDEVID);
return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
object_size > zfs_metaslab_condense_block_threshold * record_size);
}
/*
* Condense the on-disk space map representation to its minimized form.
* The minimized form consists of a small number of allocations followed
* by the entries of the free range tree (ms_allocatable). The condensed
* spacemap contains all the entries of previous TXGs (including those in
* the pool-wide log spacemaps; thus this is effectively a superset of
* metaslab_flush()), but this TXG's entries still need to be written.
*/
static void
metaslab_condense(metaslab_t *msp, dmu_tx_t *tx)
{
range_tree_t *condense_tree;
space_map_t *sm = msp->ms_sm;
uint64_t txg = dmu_tx_get_txg(tx);
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loaded);
ASSERT(msp->ms_sm != NULL);
/*
* In order to condense the space map, we need to change it so it
* only describes which segments are currently allocated and free.
*
* All the current free space resides in the ms_allocatable, all
* the ms_defer trees, and all the ms_allocating trees. We ignore
* ms_freed because it is empty because we're in sync pass 1. We
* ignore ms_freeing because these changes are not yet reflected
* in the spacemap (they will be written later this txg).
*
* So to truncate the space map to represent all the entries of
* previous TXGs we do the following:
*
* 1] We create a range tree (condense tree) that is 100% empty.
* 2] We add to it all segments found in the ms_defer trees
* as those segments are marked as free in the original space
* map. We do the same with the ms_allocating trees for the same
* reason. Adding these segments should be a relatively
* inexpensive operation since we expect these trees to have a
* small number of nodes.
* 3] We vacate any unflushed allocs, since they are not frees we
* need to add to the condense tree. Then we vacate any
* unflushed frees as they should already be part of ms_allocatable.
* 4] At this point, we would ideally like to add all segments
* in the ms_allocatable tree from the condense tree. This way
* we would write all the entries of the condense tree as the
* condensed space map, which would only contain freed
* segments with everything else assumed to be allocated.
*
* Doing so can be prohibitively expensive as ms_allocatable can
* be large, and therefore computationally expensive to add to
* the condense_tree. Instead we first sync out an entry marking
* everything as allocated, then the condense_tree and then the
* ms_allocatable, in the condensed space map. While this is not
* optimal, it is typically close to optimal and more importantly
* much cheaper to compute.
*
* 5] Finally, as both of the unflushed trees were written to our
* new and condensed metaslab space map, we basically flushed
* all the unflushed changes to disk, thus we call
* metaslab_flush_update().
*/
ASSERT3U(spa_sync_pass(spa), ==, 1);
ASSERT(range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */
zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
"spa %s, smp size %llu, segments %llu, forcing condense=%s",
(u_longlong_t)txg, (u_longlong_t)msp->ms_id, msp,
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
spa->spa_name, (u_longlong_t)space_map_length(msp->ms_sm),
(u_longlong_t)range_tree_numsegs(msp->ms_allocatable),
msp->ms_condense_wanted ? "TRUE" : "FALSE");
msp->ms_condense_wanted = B_FALSE;
range_seg_type_t type;
uint64_t shift, start;
type = metaslab_calculate_range_tree_type(msp->ms_group->mg_vd, msp,
&start, &shift);
condense_tree = range_tree_create(NULL, type, NULL, start, shift);
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_walk(msp->ms_defer[t],
range_tree_add, condense_tree);
}
for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
range_tree_add, condense_tree);
}
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
/*
* We're about to drop the metaslab's lock thus allowing other
* consumers to change it's content. Set the metaslab's ms_condensing
* flag to ensure that allocations on this metaslab do not occur
* while we're in the middle of committing it to disk. This is only
* critical for ms_allocatable as all other range trees use per TXG
* views of their content.
*/
msp->ms_condensing = B_TRUE;
mutex_exit(&msp->ms_lock);
uint64_t object = space_map_object(msp->ms_sm);
space_map_truncate(sm,
spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx);
/*
* space_map_truncate() may have reallocated the spacemap object.
* If so, update the vdev_ms_array.
*/
if (space_map_object(msp->ms_sm) != object) {
object = space_map_object(msp->ms_sm);
dmu_write(spa->spa_meta_objset,
msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) *
msp->ms_id, sizeof (uint64_t), &object, tx);
}
/*
* Note:
* When the log space map feature is enabled, each space map will
* always have ALLOCS followed by FREES for each sync pass. This is
* typically true even when the log space map feature is disabled,
* except from the case where a metaslab goes through metaslab_sync()
* and gets condensed. In that case the metaslab's space map will have
* ALLOCS followed by FREES (due to condensing) followed by ALLOCS
* followed by FREES (due to space_map_write() in metaslab_sync()) for
* sync pass 1.
*/
range_tree_t *tmp_tree = range_tree_create(NULL, type, NULL, start,
shift);
range_tree_add(tmp_tree, msp->ms_start, msp->ms_size);
space_map_write(sm, tmp_tree, SM_ALLOC, SM_NO_VDEVID, tx);
space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
space_map_write(sm, condense_tree, SM_FREE, SM_NO_VDEVID, tx);
range_tree_vacate(condense_tree, NULL, NULL);
range_tree_destroy(condense_tree);
range_tree_vacate(tmp_tree, NULL, NULL);
range_tree_destroy(tmp_tree);
mutex_enter(&msp->ms_lock);
msp->ms_condensing = B_FALSE;
metaslab_flush_update(msp, tx);
}
static void
metaslab_unflushed_add(metaslab_t *msp, dmu_tx_t *tx)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
ASSERT(spa_syncing_log_sm(spa) != NULL);
ASSERT(msp->ms_sm != NULL);
ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
mutex_enter(&spa->spa_flushed_ms_lock);
metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
metaslab_set_unflushed_dirty(msp, B_TRUE);
avl_add(&spa->spa_metaslabs_by_flushed, msp);
mutex_exit(&spa->spa_flushed_ms_lock);
spa_log_sm_increment_current_mscount(spa);
spa_log_summary_add_flushed_metaslab(spa, B_TRUE);
}
void
metaslab_unflushed_bump(metaslab_t *msp, dmu_tx_t *tx, boolean_t dirty)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
ASSERT(spa_syncing_log_sm(spa) != NULL);
ASSERT(msp->ms_sm != NULL);
ASSERT(metaslab_unflushed_txg(msp) != 0);
ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp);
ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa));
/* update metaslab's position in our flushing tree */
uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp);
boolean_t ms_prev_flushed_dirty = metaslab_unflushed_dirty(msp);
mutex_enter(&spa->spa_flushed_ms_lock);
avl_remove(&spa->spa_metaslabs_by_flushed, msp);
metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
metaslab_set_unflushed_dirty(msp, dirty);
avl_add(&spa->spa_metaslabs_by_flushed, msp);
mutex_exit(&spa->spa_flushed_ms_lock);
/* update metaslab counts of spa_log_sm_t nodes */
spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg);
spa_log_sm_increment_current_mscount(spa);
/* update log space map summary */
spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg,
ms_prev_flushed_dirty);
spa_log_summary_add_flushed_metaslab(spa, dirty);
/* cleanup obsolete logs if any */
spa_cleanup_old_sm_logs(spa, tx);
}
/*
* Called when the metaslab has been flushed (its own spacemap now reflects
* all the contents of the pool-wide spacemap log). Updates the metaslab's
* metadata and any pool-wide related log space map data (e.g. summary,
* obsolete logs, etc..) to reflect that.
*/
static void
metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx)
{
metaslab_group_t *mg = msp->ms_group;
spa_t *spa = mg->mg_vd->vdev_spa;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(spa_sync_pass(spa), ==, 1);
/*
* Just because a metaslab got flushed, that doesn't mean that
* it will pass through metaslab_sync_done(). Thus, make sure to
* update ms_synced_length here in case it doesn't.
*/
msp->ms_synced_length = space_map_length(msp->ms_sm);
/*
* We may end up here from metaslab_condense() without the
* feature being active. In that case this is a no-op.
*/
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP) ||
metaslab_unflushed_txg(msp) == 0)
return;
metaslab_unflushed_bump(msp, tx, B_FALSE);
}
boolean_t
metaslab_flush(metaslab_t *msp, dmu_tx_t *tx)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(spa_sync_pass(spa), ==, 1);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
ASSERT(msp->ms_sm != NULL);
ASSERT(metaslab_unflushed_txg(msp) != 0);
ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL);
/*
* There is nothing wrong with flushing the same metaslab twice, as
* this codepath should work on that case. However, the current
* flushing scheme makes sure to avoid this situation as we would be
* making all these calls without having anything meaningful to write
* to disk. We assert this behavior here.
*/
ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx));
/*
* We can not flush while loading, because then we would
* not load the ms_unflushed_{allocs,frees}.
*/
if (msp->ms_loading)
return (B_FALSE);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
metaslab_verify_weight_and_frag(msp);
/*
* Metaslab condensing is effectively flushing. Therefore if the
* metaslab can be condensed we can just condense it instead of
* flushing it.
*
* Note that metaslab_condense() does call metaslab_flush_update()
* so we can just return immediately after condensing. We also
* don't need to care about setting ms_flushing or broadcasting
* ms_flush_cv, even if we temporarily drop the ms_lock in
* metaslab_condense(), as the metaslab is already loaded.
*/
if (msp->ms_loaded && metaslab_should_condense(msp)) {
metaslab_group_t *mg = msp->ms_group;
/*
* For all histogram operations below refer to the
* comments of metaslab_sync() where we follow a
* similar procedure.
*/
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
metaslab_group_histogram_remove(mg, msp);
metaslab_condense(msp, tx);
space_map_histogram_clear(msp->ms_sm);
space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
ASSERT(range_tree_is_empty(msp->ms_freed));
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
space_map_histogram_add(msp->ms_sm,
msp->ms_defer[t], tx);
}
metaslab_aux_histograms_update(msp);
metaslab_group_histogram_add(mg, msp);
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
/*
* Since we recreated the histogram (and potentially
* the ms_sm too while condensing) ensure that the
* weight is updated too because we are not guaranteed
* that this metaslab is dirty and will go through
* metaslab_sync_done().
*/
metaslab_recalculate_weight_and_sort(msp);
return (B_TRUE);
}
msp->ms_flushing = B_TRUE;
uint64_t sm_len_before = space_map_length(msp->ms_sm);
mutex_exit(&msp->ms_lock);
space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC,
SM_NO_VDEVID, tx);
space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE,
SM_NO_VDEVID, tx);
mutex_enter(&msp->ms_lock);
uint64_t sm_len_after = space_map_length(msp->ms_sm);
if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) {
zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, "
"ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, "
"appended %llu bytes", (u_longlong_t)dmu_tx_get_txg(tx),
spa_name(spa),
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
(u_longlong_t)msp->ms_id,
(u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
(u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
(u_longlong_t)(sm_len_after - sm_len_before));
}
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
metaslab_verify_weight_and_frag(msp);
metaslab_flush_update(msp, tx);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
metaslab_verify_weight_and_frag(msp);
msp->ms_flushing = B_FALSE;
cv_broadcast(&msp->ms_flush_cv);
return (B_TRUE);
}
/*
* Write a metaslab to disk in the context of the specified transaction group.
*/
void
metaslab_sync(metaslab_t *msp, uint64_t txg)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa_meta_objset(spa);
range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
dmu_tx_t *tx;
ASSERT(!vd->vdev_ishole);
/*
* This metaslab has just been added so there's no work to do now.
*/
if (msp->ms_new) {
ASSERT0(range_tree_space(alloctree));
ASSERT0(range_tree_space(msp->ms_freeing));
ASSERT0(range_tree_space(msp->ms_freed));
ASSERT0(range_tree_space(msp->ms_checkpointing));
ASSERT0(range_tree_space(msp->ms_trim));
return;
}
/*
* Normally, we don't want to process a metaslab if there are no
* allocations or frees to perform. However, if the metaslab is being
* forced to condense, it's loaded and we're not beyond the final
* dirty txg, we need to let it through. Not condensing beyond the
* final dirty txg prevents an issue where metaslabs that need to be
* condensed but were loaded for other reasons could cause a panic
* here. By only checking the txg in that branch of the conditional,
* we preserve the utility of the VERIFY statements in all other
* cases.
*/
if (range_tree_is_empty(alloctree) &&
range_tree_is_empty(msp->ms_freeing) &&
range_tree_is_empty(msp->ms_checkpointing) &&
!(msp->ms_loaded && msp->ms_condense_wanted &&
txg <= spa_final_dirty_txg(spa)))
return;
VERIFY3U(txg, <=, spa_final_dirty_txg(spa));
/*
* The only state that can actually be changing concurrently
* with metaslab_sync() is the metaslab's ms_allocatable. No
* other thread can be modifying this txg's alloc, freeing,
* freed, or space_map_phys_t. We drop ms_lock whenever we
* could call into the DMU, because the DMU can call down to
* us (e.g. via zio_free()) at any time.
*
* The spa_vdev_remove_thread() can be reading metaslab state
* concurrently, and it is locked out by the ms_sync_lock.
* Note that the ms_lock is insufficient for this, because it
* is dropped by space_map_write().
*/
tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
/*
* Generate a log space map if one doesn't exist already.
*/
spa_generate_syncing_log_sm(spa, tx);
if (msp->ms_sm == NULL) {
uint64_t new_object = space_map_alloc(mos,
spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
zfs_metaslab_sm_blksz_with_log :
zfs_metaslab_sm_blksz_no_log, tx);
VERIFY3U(new_object, !=, 0);
dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
msp->ms_id, sizeof (uint64_t), &new_object, tx);
VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
msp->ms_start, msp->ms_size, vd->vdev_ashift));
ASSERT(msp->ms_sm != NULL);
ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
ASSERT0(metaslab_allocated_space(msp));
}
if (!range_tree_is_empty(msp->ms_checkpointing) &&
vd->vdev_checkpoint_sm == NULL) {
ASSERT(spa_has_checkpoint(spa));
uint64_t new_object = space_map_alloc(mos,
zfs_vdev_standard_sm_blksz, tx);
VERIFY3U(new_object, !=, 0);
VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
/*
* We save the space map object as an entry in vdev_top_zap
* so it can be retrieved when the pool is reopened after an
* export or through zdb.
*/
VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
sizeof (new_object), 1, &new_object, tx));
}
mutex_enter(&msp->ms_sync_lock);
mutex_enter(&msp->ms_lock);
/*
* Note: metaslab_condense() clears the space map's histogram.
* Therefore we must verify and remove this histogram before
* condensing.
*/
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
metaslab_group_histogram_remove(mg, msp);
if (spa->spa_sync_pass == 1 && msp->ms_loaded &&
metaslab_should_condense(msp))
metaslab_condense(msp, tx);
/*
* We'll be going to disk to sync our space accounting, thus we
* drop the ms_lock during that time so allocations coming from
* open-context (ZIL) for future TXGs do not block.
*/
mutex_exit(&msp->ms_lock);
space_map_t *log_sm = spa_syncing_log_sm(spa);
if (log_sm != NULL) {
ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
if (metaslab_unflushed_txg(msp) == 0)
metaslab_unflushed_add(msp, tx);
else if (!metaslab_unflushed_dirty(msp))
metaslab_unflushed_bump(msp, tx, B_TRUE);
space_map_write(log_sm, alloctree, SM_ALLOC,
vd->vdev_id, tx);
space_map_write(log_sm, msp->ms_freeing, SM_FREE,
vd->vdev_id, tx);
mutex_enter(&msp->ms_lock);
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
range_tree_remove_xor_add(alloctree,
msp->ms_unflushed_frees, msp->ms_unflushed_allocs);
range_tree_remove_xor_add(msp->ms_freeing,
msp->ms_unflushed_allocs, msp->ms_unflushed_frees);
spa->spa_unflushed_stats.sus_memused +=
metaslab_unflushed_changes_memused(msp);
} else {
ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
SM_NO_VDEVID, tx);
space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
SM_NO_VDEVID, tx);
mutex_enter(&msp->ms_lock);
}
msp->ms_allocated_space += range_tree_space(alloctree);
ASSERT3U(msp->ms_allocated_space, >=,
range_tree_space(msp->ms_freeing));
msp->ms_allocated_space -= range_tree_space(msp->ms_freeing);
if (!range_tree_is_empty(msp->ms_checkpointing)) {
ASSERT(spa_has_checkpoint(spa));
ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
/*
* Since we are doing writes to disk and the ms_checkpointing
* tree won't be changing during that time, we drop the
* ms_lock while writing to the checkpoint space map, for the
* same reason mentioned above.
*/
mutex_exit(&msp->ms_lock);
space_map_write(vd->vdev_checkpoint_sm,
msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx);
mutex_enter(&msp->ms_lock);
spa->spa_checkpoint_info.sci_dspace +=
range_tree_space(msp->ms_checkpointing);
vd->vdev_stat.vs_checkpoint_space +=
range_tree_space(msp->ms_checkpointing);
ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
-space_map_allocated(vd->vdev_checkpoint_sm));
range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
}
if (msp->ms_loaded) {
/*
* When the space map is loaded, we have an accurate
* histogram in the range tree. This gives us an opportunity
* to bring the space map's histogram up-to-date so we clear
* it first before updating it.
*/
space_map_histogram_clear(msp->ms_sm);
space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
/*
* Since we've cleared the histogram we need to add back
* any free space that has already been processed, plus
* any deferred space. This allows the on-disk histogram
* to accurately reflect all free space even if some space
* is not yet available for allocation (i.e. deferred).
*/
space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
/*
* Add back any deferred free space that has not been
* added back into the in-core free tree yet. This will
* ensure that we don't end up with a space map histogram
* that is completely empty unless the metaslab is fully
* allocated.
*/
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
space_map_histogram_add(msp->ms_sm,
msp->ms_defer[t], tx);
}
}
/*
* Always add the free space from this sync pass to the space
* map histogram. We want to make sure that the on-disk histogram
* accounts for all free space. If the space map is not loaded,
* then we will lose some accuracy but will correct it the next
* time we load the space map.
*/
space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
metaslab_aux_histograms_update(msp);
metaslab_group_histogram_add(mg, msp);
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
/*
* For sync pass 1, we avoid traversing this txg's free range tree
* and instead will just swap the pointers for freeing and freed.
* We can safely do this since the freed_tree is guaranteed to be
* empty on the initial pass.
*
* Keep in mind that even if we are currently using a log spacemap
* we want current frees to end up in the ms_allocatable (but not
* get appended to the ms_sm) so their ranges can be reused as usual.
*/
if (spa_sync_pass(spa) == 1) {
range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
ASSERT0(msp->ms_allocated_this_txg);
} else {
range_tree_vacate(msp->ms_freeing,
range_tree_add, msp->ms_freed);
}
msp->ms_allocated_this_txg += range_tree_space(alloctree);
range_tree_vacate(alloctree, NULL, NULL);
ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
& TXG_MASK]));
ASSERT0(range_tree_space(msp->ms_freeing));
ASSERT0(range_tree_space(msp->ms_checkpointing));
mutex_exit(&msp->ms_lock);
/*
* Verify that the space map object ID has been recorded in the
* vdev_ms_array.
*/
uint64_t object;
VERIFY0(dmu_read(mos, vd->vdev_ms_array,
msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0));
VERIFY3U(object, ==, space_map_object(msp->ms_sm));
mutex_exit(&msp->ms_sync_lock);
dmu_tx_commit(tx);
}
static void
metaslab_evict(metaslab_t *msp, uint64_t txg)
{
if (!msp->ms_loaded || msp->ms_disabled != 0)
return;
for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
VERIFY0(range_tree_space(
msp->ms_allocating[(txg + t) & TXG_MASK]));
}
if (msp->ms_allocator != -1)
metaslab_passivate(msp, msp->ms_weight & ~METASLAB_ACTIVE_MASK);
if (!metaslab_debug_unload)
metaslab_unload(msp);
}
/*
* Called after a transaction group has completely synced to mark
* all of the metaslab's free space as usable.
*/
void
metaslab_sync_done(metaslab_t *msp, uint64_t txg)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
range_tree_t **defer_tree;
int64_t alloc_delta, defer_delta;
boolean_t defer_allowed = B_TRUE;
ASSERT(!vd->vdev_ishole);
mutex_enter(&msp->ms_lock);
if (msp->ms_new) {
/* this is a new metaslab, add its capacity to the vdev */
metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size);
/* there should be no allocations nor frees at this point */
VERIFY0(msp->ms_allocated_this_txg);
VERIFY0(range_tree_space(msp->ms_freed));
}
ASSERT0(range_tree_space(msp->ms_freeing));
ASSERT0(range_tree_space(msp->ms_checkpointing));
defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
metaslab_class_get_alloc(spa_normal_class(spa));
if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) {
defer_allowed = B_FALSE;
}
defer_delta = 0;
alloc_delta = msp->ms_allocated_this_txg -
range_tree_space(msp->ms_freed);
if (defer_allowed) {
defer_delta = range_tree_space(msp->ms_freed) -
range_tree_space(*defer_tree);
} else {
defer_delta -= range_tree_space(*defer_tree);
}
metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta,
defer_delta, 0);
if (spa_syncing_log_sm(spa) == NULL) {
/*
* If there's a metaslab_load() in progress and we don't have
* a log space map, it means that we probably wrote to the
* metaslab's space map. If this is the case, we need to
* make sure that we wait for the load to complete so that we
* have a consistent view at the in-core side of the metaslab.
*/
metaslab_load_wait(msp);
} else {
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
}
/*
* When auto-trimming is enabled, free ranges which are added to
* ms_allocatable are also be added to ms_trim. The ms_trim tree is
* periodically consumed by the vdev_autotrim_thread() which issues
* trims for all ranges and then vacates the tree. The ms_trim tree
* can be discarded at any time with the sole consequence of recent
* frees not being trimmed.
*/
if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) {
range_tree_walk(*defer_tree, range_tree_add, msp->ms_trim);
if (!defer_allowed) {
range_tree_walk(msp->ms_freed, range_tree_add,
msp->ms_trim);
}
} else {
range_tree_vacate(msp->ms_trim, NULL, NULL);
}
/*
* Move the frees from the defer_tree back to the free
* range tree (if it's loaded). Swap the freed_tree and
* the defer_tree -- this is safe to do because we've
* just emptied out the defer_tree.
*/
range_tree_vacate(*defer_tree,
msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable);
if (defer_allowed) {
range_tree_swap(&msp->ms_freed, defer_tree);
} else {
range_tree_vacate(msp->ms_freed,
msp->ms_loaded ? range_tree_add : NULL,
msp->ms_allocatable);
}
msp->ms_synced_length = space_map_length(msp->ms_sm);
msp->ms_deferspace += defer_delta;
ASSERT3S(msp->ms_deferspace, >=, 0);
ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
if (msp->ms_deferspace != 0) {
/*
* Keep syncing this metaslab until all deferred frees
* are back in circulation.
*/
vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
}
metaslab_aux_histograms_update_done(msp, defer_allowed);
if (msp->ms_new) {
msp->ms_new = B_FALSE;
mutex_enter(&mg->mg_lock);
mg->mg_ms_ready++;
mutex_exit(&mg->mg_lock);
}
/*
* Re-sort metaslab within its group now that we've adjusted
* its allocatable space.
*/
metaslab_recalculate_weight_and_sort(msp);
ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
ASSERT0(range_tree_space(msp->ms_freeing));
ASSERT0(range_tree_space(msp->ms_freed));
ASSERT0(range_tree_space(msp->ms_checkpointing));
msp->ms_allocating_total -= msp->ms_allocated_this_txg;
msp->ms_allocated_this_txg = 0;
mutex_exit(&msp->ms_lock);
}
void
metaslab_sync_reassess(metaslab_group_t *mg)
{
spa_t *spa = mg->mg_class->mc_spa;
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
metaslab_group_alloc_update(mg);
mg->mg_fragmentation = metaslab_group_fragmentation(mg);
/*
* Preload the next potential metaslabs but only on active
* metaslab groups. We can get into a state where the metaslab
* is no longer active since we dirty metaslabs as we remove a
* a device, thus potentially making the metaslab group eligible
* for preloading.
*/
if (mg->mg_activation_count > 0) {
metaslab_group_preload(mg);
}
spa_config_exit(spa, SCL_ALLOC, FTAG);
}
/*
* When writing a ditto block (i.e. more than one DVA for a given BP) on
* the same vdev as an existing DVA of this BP, then try to allocate it
* on a different metaslab than existing DVAs (i.e. a unique metaslab).
*/
static boolean_t
metaslab_is_unique(metaslab_t *msp, dva_t *dva)
{
uint64_t dva_ms_id;
if (DVA_GET_ASIZE(dva) == 0)
return (B_TRUE);
if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
return (B_TRUE);
dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift;
return (msp->ms_id != dva_ms_id);
}
/*
* ==========================================================================
* Metaslab allocation tracing facility
* ==========================================================================
*/
/*
* Add an allocation trace element to the allocation tracing list.
*/
static void
metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset,
int allocator)
{
metaslab_alloc_trace_t *mat;
if (!metaslab_trace_enabled)
return;
/*
* When the tracing list reaches its maximum we remove
* the second element in the list before adding a new one.
* By removing the second element we preserve the original
* entry as a clue to what allocations steps have already been
* performed.
*/
if (zal->zal_size == metaslab_trace_max_entries) {
metaslab_alloc_trace_t *mat_next;
#ifdef ZFS_DEBUG
panic("too many entries in allocation list");
#endif
METASLABSTAT_BUMP(metaslabstat_trace_over_limit);
zal->zal_size--;
mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
list_remove(&zal->zal_list, mat_next);
kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
}
mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
list_link_init(&mat->mat_list_node);
mat->mat_mg = mg;
mat->mat_msp = msp;
mat->mat_size = psize;
mat->mat_dva_id = dva_id;
mat->mat_offset = offset;
mat->mat_weight = 0;
mat->mat_allocator = allocator;
if (msp != NULL)
mat->mat_weight = msp->ms_weight;
/*
* The list is part of the zio so locking is not required. Only
* a single thread will perform allocations for a given zio.
*/
list_insert_tail(&zal->zal_list, mat);
zal->zal_size++;
ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
}
void
metaslab_trace_init(zio_alloc_list_t *zal)
{
list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
offsetof(metaslab_alloc_trace_t, mat_list_node));
zal->zal_size = 0;
}
void
metaslab_trace_fini(zio_alloc_list_t *zal)
{
metaslab_alloc_trace_t *mat;
while ((mat = list_remove_head(&zal->zal_list)) != NULL)
kmem_cache_free(metaslab_alloc_trace_cache, mat);
list_destroy(&zal->zal_list);
zal->zal_size = 0;
}
/*
* ==========================================================================
* Metaslab block operations
* ==========================================================================
*/
static void
metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, const void *tag,
int flags, int allocator)
{
if (!(flags & METASLAB_ASYNC_ALLOC) ||
(flags & METASLAB_DONT_THROTTLE))
return;
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
if (!mg->mg_class->mc_alloc_throttle_enabled)
return;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
(void) zfs_refcount_add(&mga->mga_alloc_queue_depth, tag);
}
static void
metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator)
{
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
metaslab_class_allocator_t *mca =
&mg->mg_class->mc_allocator[allocator];
uint64_t max = mg->mg_max_alloc_queue_depth;
uint64_t cur = mga->mga_cur_max_alloc_queue_depth;
while (cur < max) {
if (atomic_cas_64(&mga->mga_cur_max_alloc_queue_depth,
cur, cur + 1) == cur) {
atomic_inc_64(&mca->mca_alloc_max_slots);
return;
}
cur = mga->mga_cur_max_alloc_queue_depth;
}
}
void
metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, const void *tag,
int flags, int allocator, boolean_t io_complete)
{
if (!(flags & METASLAB_ASYNC_ALLOC) ||
(flags & METASLAB_DONT_THROTTLE))
return;
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
if (!mg->mg_class->mc_alloc_throttle_enabled)
return;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
(void) zfs_refcount_remove(&mga->mga_alloc_queue_depth, tag);
if (io_complete)
metaslab_group_increment_qdepth(mg, allocator);
}
void
metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, const void *tag,
int allocator)
{
#ifdef ZFS_DEBUG
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
for (int d = 0; d < ndvas; d++) {
uint64_t vdev = DVA_GET_VDEV(&dva[d]);
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
VERIFY(zfs_refcount_not_held(&mga->mga_alloc_queue_depth, tag));
}
#endif
}
static uint64_t
metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
{
uint64_t start;
range_tree_t *rt = msp->ms_allocatable;
metaslab_class_t *mc = msp->ms_group->mg_class;
ASSERT(MUTEX_HELD(&msp->ms_lock));
VERIFY(!msp->ms_condensing);
VERIFY0(msp->ms_disabled);
start = mc->mc_ops->msop_alloc(msp, size);
if (start != -1ULL) {
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
range_tree_remove(rt, start, size);
range_tree_clear(msp->ms_trim, start, size);
if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
msp->ms_allocating_total += size;
/* Track the last successful allocation */
msp->ms_alloc_txg = txg;
metaslab_verify_space(msp, txg);
}
/*
* Now that we've attempted the allocation we need to update the
* metaslab's maximum block size since it may have changed.
*/
msp->ms_max_size = metaslab_largest_allocatable(msp);
return (start);
}
/*
* Find the metaslab with the highest weight that is less than what we've
* already tried. In the common case, this means that we will examine each
* metaslab at most once. Note that concurrent callers could reorder metaslabs
* by activation/passivation once we have dropped the mg_lock. If a metaslab is
* activated by another thread, and we fail to allocate from the metaslab we
* have selected, we may not try the newly-activated metaslab, and instead
* activate another metaslab. This is not optimal, but generally does not cause
* any problems (a possible exception being if every metaslab is completely full
* except for the newly-activated metaslab which we fail to examine).
*/
static metaslab_t *
find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator,
boolean_t try_hard, zio_alloc_list_t *zal, metaslab_t *search,
boolean_t *was_active)
{
avl_index_t idx;
avl_tree_t *t = &mg->mg_metaslab_tree;
metaslab_t *msp = avl_find(t, search, &idx);
if (msp == NULL)
msp = avl_nearest(t, idx, AVL_AFTER);
uint_t tries = 0;
for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
int i;
if (!try_hard && tries > zfs_metaslab_find_max_tries) {
METASLABSTAT_BUMP(metaslabstat_too_many_tries);
return (NULL);
}
tries++;
if (!metaslab_should_allocate(msp, asize, try_hard)) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_TOO_SMALL, allocator);
continue;
}
/*
* If the selected metaslab is condensing or disabled,
* skip it.
*/
if (msp->ms_condensing || msp->ms_disabled > 0)
continue;
*was_active = msp->ms_allocator != -1;
/*
* If we're activating as primary, this is our first allocation
* from this disk, so we don't need to check how close we are.
* If the metaslab under consideration was already active,
* we're getting desperate enough to steal another allocator's
* metaslab, so we still don't care about distances.
*/
if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active)
break;
for (i = 0; i < d; i++) {
if (want_unique &&
!metaslab_is_unique(msp, &dva[i]))
break; /* try another metaslab */
}
if (i == d)
break;
}
if (msp != NULL) {
search->ms_weight = msp->ms_weight;
search->ms_start = msp->ms_start + 1;
search->ms_allocator = msp->ms_allocator;
search->ms_primary = msp->ms_primary;
}
return (msp);
}
static void
metaslab_active_mask_verify(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
return;
if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0)
return;
if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) {
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
VERIFY3S(msp->ms_allocator, !=, -1);
VERIFY(msp->ms_primary);
return;
}
if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) {
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
VERIFY3S(msp->ms_allocator, !=, -1);
VERIFY(!msp->ms_primary);
return;
}
if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
VERIFY3S(msp->ms_allocator, ==, -1);
return;
}
}
static uint64_t
metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
int allocator, boolean_t try_hard)
{
metaslab_t *msp = NULL;
uint64_t offset = -1ULL;
uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY;
for (int i = 0; i < d; i++) {
if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
activation_weight = METASLAB_WEIGHT_SECONDARY;
} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
activation_weight = METASLAB_WEIGHT_CLAIM;
break;
}
}
/*
* If we don't have enough metaslabs active to fill the entire array, we
* just use the 0th slot.
*/
if (mg->mg_ms_ready < mg->mg_allocators * 3)
allocator = 0;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2);
metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
search->ms_weight = UINT64_MAX;
search->ms_start = 0;
/*
* At the end of the metaslab tree are the already-active metaslabs,
* first the primaries, then the secondaries. When we resume searching
* through the tree, we need to consider ms_allocator and ms_primary so
* we start in the location right after where we left off, and don't
* accidentally loop forever considering the same metaslabs.
*/
search->ms_allocator = -1;
search->ms_primary = B_TRUE;
for (;;) {
boolean_t was_active = B_FALSE;
mutex_enter(&mg->mg_lock);
if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
mga->mga_primary != NULL) {
msp = mga->mga_primary;
/*
* Even though we don't hold the ms_lock for the
* primary metaslab, those fields should not
* change while we hold the mg_lock. Thus it is
* safe to make assertions on them.
*/
ASSERT(msp->ms_primary);
ASSERT3S(msp->ms_allocator, ==, allocator);
ASSERT(msp->ms_loaded);
was_active = B_TRUE;
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
mga->mga_secondary != NULL) {
msp = mga->mga_secondary;
/*
* See comment above about the similar assertions
* for the primary metaslab.
*/
ASSERT(!msp->ms_primary);
ASSERT3S(msp->ms_allocator, ==, allocator);
ASSERT(msp->ms_loaded);
was_active = B_TRUE;
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
} else {
msp = find_valid_metaslab(mg, activation_weight, dva, d,
want_unique, asize, allocator, try_hard, zal,
search, &was_active);
}
mutex_exit(&mg->mg_lock);
if (msp == NULL) {
kmem_free(search, sizeof (*search));
return (-1ULL);
}
mutex_enter(&msp->ms_lock);
metaslab_active_mask_verify(msp);
/*
* This code is disabled out because of issues with
* tracepoints in non-gpl kernel modules.
*/
#if 0
DTRACE_PROBE3(ms__activation__attempt,
metaslab_t *, msp, uint64_t, activation_weight,
boolean_t, was_active);
#endif
/*
* Ensure that the metaslab we have selected is still
* capable of handling our request. It's possible that
* another thread may have changed the weight while we
* were blocked on the metaslab lock. We check the
* active status first to see if we need to set_selected_txg
* a new metaslab.
*/
if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
ASSERT3S(msp->ms_allocator, ==, -1);
mutex_exit(&msp->ms_lock);
continue;
}
/*
* If the metaslab was activated for another allocator
* while we were waiting in the ms_lock above, or it's
* a primary and we're seeking a secondary (or vice versa),
* we go back and select a new metaslab.
*/
if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) &&
(msp->ms_allocator != -1) &&
(msp->ms_allocator != allocator || ((activation_weight ==
METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) {
ASSERT(msp->ms_loaded);
ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) ||
msp->ms_allocator != -1);
mutex_exit(&msp->ms_lock);
continue;
}
/*
* This metaslab was used for claiming regions allocated
* by the ZIL during pool import. Once these regions are
* claimed we don't need to keep the CLAIM bit set
* anymore. Passivate this metaslab to zero its activation
* mask.
*/
if (msp->ms_weight & METASLAB_WEIGHT_CLAIM &&
activation_weight != METASLAB_WEIGHT_CLAIM) {
ASSERT(msp->ms_loaded);
ASSERT3S(msp->ms_allocator, ==, -1);
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_WEIGHT_CLAIM);
mutex_exit(&msp->ms_lock);
continue;
}
metaslab_set_selected_txg(msp, txg);
int activation_error =
metaslab_activate(msp, allocator, activation_weight);
metaslab_active_mask_verify(msp);
/*
* If the metaslab was activated by another thread for
* another allocator or activation_weight (EBUSY), or it
* failed because another metaslab was assigned as primary
* for this allocator (EEXIST) we continue using this
* metaslab for our allocation, rather than going on to a
* worse metaslab (we waited for that metaslab to be loaded
* after all).
*
* If the activation failed due to an I/O error or ENOSPC we
* skip to the next metaslab.
*/
boolean_t activated;
if (activation_error == 0) {
activated = B_TRUE;
} else if (activation_error == EBUSY ||
activation_error == EEXIST) {
activated = B_FALSE;
} else {
mutex_exit(&msp->ms_lock);
continue;
}
ASSERT(msp->ms_loaded);
/*
* Now that we have the lock, recheck to see if we should
* continue to use this metaslab for this allocation. The
* the metaslab is now loaded so metaslab_should_allocate()
* can accurately determine if the allocation attempt should
* proceed.
*/
if (!metaslab_should_allocate(msp, asize, try_hard)) {
/* Passivate this metaslab and select a new one. */
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_TOO_SMALL, allocator);
goto next;
}
/*
* If this metaslab is currently condensing then pick again
* as we can't manipulate this metaslab until it's committed
* to disk. If this metaslab is being initialized, we shouldn't
* allocate from it since the allocated region might be
* overwritten after allocation.
*/
if (msp->ms_condensing) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_CONDENSING, allocator);
if (activated) {
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_ACTIVE_MASK);
}
mutex_exit(&msp->ms_lock);
continue;
} else if (msp->ms_disabled > 0) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_DISABLED, allocator);
if (activated) {
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_ACTIVE_MASK);
}
mutex_exit(&msp->ms_lock);
continue;
}
offset = metaslab_block_alloc(msp, asize, txg);
metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator);
if (offset != -1ULL) {
/* Proactively passivate the metaslab, if needed */
if (activated)
metaslab_segment_may_passivate(msp);
break;
}
next:
ASSERT(msp->ms_loaded);
/*
* This code is disabled out because of issues with
* tracepoints in non-gpl kernel modules.
*/
#if 0
DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp,
uint64_t, asize);
#endif
/*
* We were unable to allocate from this metaslab so determine
* a new weight for this metaslab. Now that we have loaded
* the metaslab we can provide a better hint to the metaslab
* selector.
*
* For space-based metaslabs, we use the maximum block size.
* This information is only available when the metaslab
* is loaded and is more accurate than the generic free
* space weight that was calculated by metaslab_weight().
* This information allows us to quickly compare the maximum
* available allocation in the metaslab to the allocation
* size being requested.
*
* For segment-based metaslabs, determine the new weight
* based on the highest bucket in the range tree. We
* explicitly use the loaded segment weight (i.e. the range
* tree histogram) since it contains the space that is
* currently available for allocation and is accurate
* even within a sync pass.
*/
uint64_t weight;
if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
weight = metaslab_largest_allocatable(msp);
WEIGHT_SET_SPACEBASED(weight);
} else {
weight = metaslab_weight_from_range_tree(msp);
}
if (activated) {
metaslab_passivate(msp, weight);
} else {
/*
* For the case where we use the metaslab that is
* active for another allocator we want to make
* sure that we retain the activation mask.
*
* Note that we could attempt to use something like
* metaslab_recalculate_weight_and_sort() that
* retains the activation mask here. That function
* uses metaslab_weight() to set the weight though
* which is not as accurate as the calculations
* above.
*/
weight |= msp->ms_weight & METASLAB_ACTIVE_MASK;
metaslab_group_sort(mg, msp, weight);
}
metaslab_active_mask_verify(msp);
/*
* We have just failed an allocation attempt, check
* that metaslab_should_allocate() agrees. Otherwise,
* we may end up in an infinite loop retrying the same
* metaslab.
*/
ASSERT(!metaslab_should_allocate(msp, asize, try_hard));
mutex_exit(&msp->ms_lock);
}
mutex_exit(&msp->ms_lock);
kmem_free(search, sizeof (*search));
return (offset);
}
static uint64_t
metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
int allocator, boolean_t try_hard)
{
uint64_t offset;
ASSERT(mg->mg_initialized);
offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique,
dva, d, allocator, try_hard);
mutex_enter(&mg->mg_lock);
if (offset == -1ULL) {
mg->mg_failed_allocations++;
metaslab_trace_add(zal, mg, NULL, asize, d,
TRACE_GROUP_FAILURE, allocator);
if (asize == SPA_GANGBLOCKSIZE) {
/*
* This metaslab group was unable to allocate
* the minimum gang block size so it must be out of
* space. We must notify the allocation throttle
* to start skipping allocation attempts to this
* metaslab group until more space becomes available.
* Note: this failure cannot be caused by the
* allocation throttle since the allocation throttle
* is only responsible for skipping devices and
* not failing block allocations.
*/
mg->mg_no_free_space = B_TRUE;
}
}
mg->mg_allocations++;
mutex_exit(&mg->mg_lock);
return (offset);
}
/*
* Allocate a block for the specified i/o.
*/
int
metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
zio_alloc_list_t *zal, int allocator)
{
metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
- metaslab_group_t *mg, *fast_mg, *rotor;
+ metaslab_group_t *mg, *rotor;
vdev_t *vd;
boolean_t try_hard = B_FALSE;
ASSERT(!DVA_IS_VALID(&dva[d]));
/*
* For testing, make some blocks above a certain size be gang blocks.
* This will result in more split blocks when using device removal,
* and a large number of split blocks coupled with ztest-induced
* damage can result in extremely long reconstruction times. This
* will also test spilling from special to normal.
*/
- if (psize >= metaslab_force_ganging && (random_in_range(100) < 3)) {
+ if (psize >= metaslab_force_ganging &&
+ metaslab_force_ganging_pct > 0 &&
+ (random_in_range(100) < MIN(metaslab_force_ganging_pct, 100))) {
metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG,
allocator);
return (SET_ERROR(ENOSPC));
}
/*
* Start at the rotor and loop through all mgs until we find something.
* Note that there's no locking on mca_rotor or mca_aliquot because
* nothing actually breaks if we miss a few updates -- we just won't
* allocate quite as evenly. It all balances out over time.
*
* If we are doing ditto or log blocks, try to spread them across
* consecutive vdevs. If we're forced to reuse a vdev before we've
* allocated all of our ditto blocks, then try and spread them out on
* that vdev as much as possible. If it turns out to not be possible,
* gradually lower our standards until anything becomes acceptable.
* Also, allocating on consecutive vdevs (as opposed to random vdevs)
* gives us hope of containing our fault domains to something we're
* able to reason about. Otherwise, any two top-level vdev failures
* will guarantee the loss of data. With consecutive allocation,
* only two adjacent top-level vdev failures will result in data loss.
*
* If we are doing gang blocks (hintdva is non-NULL), try to keep
* ourselves on the same vdev as our gang block header. That
* way, we can hope for locality in vdev_cache, plus it makes our
* fault domains something tractable.
*/
if (hintdva) {
vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
/*
* It's possible the vdev we're using as the hint no
* longer exists or its mg has been closed (e.g. by
* device removal). Consult the rotor when
* all else fails.
*/
if (vd != NULL && vd->vdev_mg != NULL) {
mg = vdev_get_mg(vd, mc);
if (flags & METASLAB_HINTBP_AVOID)
mg = mg->mg_next;
} else {
mg = mca->mca_rotor;
}
} else if (d != 0) {
vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
mg = vd->vdev_mg->mg_next;
- } else if (flags & METASLAB_FASTWRITE) {
- mg = fast_mg = mca->mca_rotor;
-
- do {
- if (fast_mg->mg_vd->vdev_pending_fastwrite <
- mg->mg_vd->vdev_pending_fastwrite)
- mg = fast_mg;
- } while ((fast_mg = fast_mg->mg_next) != mca->mca_rotor);
-
} else {
ASSERT(mca->mca_rotor != NULL);
mg = mca->mca_rotor;
}
/*
* If the hint put us into the wrong metaslab class, or into a
* metaslab group that has been passivated, just follow the rotor.
*/
if (mg->mg_class != mc || mg->mg_activation_count <= 0)
mg = mca->mca_rotor;
rotor = mg;
top:
do {
boolean_t allocatable;
ASSERT(mg->mg_activation_count == 1);
vd = mg->mg_vd;
/*
* Don't allocate from faulted devices.
*/
if (try_hard) {
spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
allocatable = vdev_allocatable(vd);
spa_config_exit(spa, SCL_ZIO, FTAG);
} else {
allocatable = vdev_allocatable(vd);
}
/*
* Determine if the selected metaslab group is eligible
* for allocations. If we're ganging then don't allow
* this metaslab group to skip allocations since that would
* inadvertently return ENOSPC and suspend the pool
* even though space is still available.
*/
if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
allocatable = metaslab_group_allocatable(mg, rotor,
flags, psize, allocator, d);
}
if (!allocatable) {
metaslab_trace_add(zal, mg, NULL, psize, d,
TRACE_NOT_ALLOCATABLE, allocator);
goto next;
}
ASSERT(mg->mg_initialized);
/*
* Avoid writing single-copy data to an unhealthy,
* non-redundant vdev, unless we've already tried all
* other vdevs.
*/
if (vd->vdev_state < VDEV_STATE_HEALTHY &&
d == 0 && !try_hard && vd->vdev_children == 0) {
metaslab_trace_add(zal, mg, NULL, psize, d,
TRACE_VDEV_ERROR, allocator);
goto next;
}
ASSERT(mg->mg_class == mc);
uint64_t asize = vdev_psize_to_asize(vd, psize);
ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
/*
* If we don't need to try hard, then require that the
* block be on a different metaslab from any other DVAs
* in this BP (unique=true). If we are trying hard, then
* allow any metaslab to be used (unique=false).
*/
uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
!try_hard, dva, d, allocator, try_hard);
if (offset != -1ULL) {
/*
* If we've just selected this metaslab group,
* figure out whether the corresponding vdev is
* over- or under-used relative to the pool,
* and set an allocation bias to even it out.
*
* Bias is also used to compensate for unequally
* sized vdevs so that space is allocated fairly.
*/
if (mca->mca_aliquot == 0 && metaslab_bias_enabled) {
vdev_stat_t *vs = &vd->vdev_stat;
int64_t vs_free = vs->vs_space - vs->vs_alloc;
int64_t mc_free = mc->mc_space - mc->mc_alloc;
int64_t ratio;
/*
* Calculate how much more or less we should
* try to allocate from this device during
* this iteration around the rotor.
*
* This basically introduces a zero-centered
* bias towards the devices with the most
* free space, while compensating for vdev
* size differences.
*
* Examples:
* vdev V1 = 16M/128M
* vdev V2 = 16M/128M
* ratio(V1) = 100% ratio(V2) = 100%
*
* vdev V1 = 16M/128M
* vdev V2 = 64M/128M
* ratio(V1) = 127% ratio(V2) = 72%
*
* vdev V1 = 16M/128M
* vdev V2 = 64M/512M
* ratio(V1) = 40% ratio(V2) = 160%
*/
ratio = (vs_free * mc->mc_alloc_groups * 100) /
(mc_free + 1);
mg->mg_bias = ((ratio - 100) *
(int64_t)mg->mg_aliquot) / 100;
} else if (!metaslab_bias_enabled) {
mg->mg_bias = 0;
}
- if ((flags & METASLAB_FASTWRITE) ||
+ if ((flags & METASLAB_ZIL) ||
atomic_add_64_nv(&mca->mca_aliquot, asize) >=
mg->mg_aliquot + mg->mg_bias) {
mca->mca_rotor = mg->mg_next;
mca->mca_aliquot = 0;
}
DVA_SET_VDEV(&dva[d], vd->vdev_id);
DVA_SET_OFFSET(&dva[d], offset);
DVA_SET_GANG(&dva[d],
((flags & METASLAB_GANG_HEADER) ? 1 : 0));
DVA_SET_ASIZE(&dva[d], asize);
- if (flags & METASLAB_FASTWRITE) {
- atomic_add_64(&vd->vdev_pending_fastwrite,
- psize);
- }
-
return (0);
}
next:
mca->mca_rotor = mg->mg_next;
mca->mca_aliquot = 0;
} while ((mg = mg->mg_next) != rotor);
/*
* If we haven't tried hard, perhaps do so now.
*/
if (!try_hard && (zfs_metaslab_try_hard_before_gang ||
GANG_ALLOCATION(flags) || (flags & METASLAB_ZIL) != 0 ||
psize <= 1 << spa->spa_min_ashift)) {
METASLABSTAT_BUMP(metaslabstat_try_hard);
try_hard = B_TRUE;
goto top;
}
memset(&dva[d], 0, sizeof (dva_t));
metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
return (SET_ERROR(ENOSPC));
}
void
metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
boolean_t checkpoint)
{
metaslab_t *msp;
spa_t *spa = vd->vdev_spa;
ASSERT(vdev_is_concrete(vd));
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
VERIFY(!msp->ms_condensing);
VERIFY3U(offset, >=, msp->ms_start);
VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
metaslab_check_free_impl(vd, offset, asize);
mutex_enter(&msp->ms_lock);
if (range_tree_is_empty(msp->ms_freeing) &&
range_tree_is_empty(msp->ms_checkpointing)) {
vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
}
if (checkpoint) {
ASSERT(spa_has_checkpoint(spa));
range_tree_add(msp->ms_checkpointing, offset, asize);
} else {
range_tree_add(msp->ms_freeing, offset, asize);
}
mutex_exit(&msp->ms_lock);
}
void
metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
(void) inner_offset;
boolean_t *checkpoint = arg;
ASSERT3P(checkpoint, !=, NULL);
if (vd->vdev_ops->vdev_op_remap != NULL)
vdev_indirect_mark_obsolete(vd, offset, size);
else
metaslab_free_impl(vd, offset, size, *checkpoint);
}
static void
metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
boolean_t checkpoint)
{
spa_t *spa = vd->vdev_spa;
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
return;
if (spa->spa_vdev_removal != NULL &&
spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
vdev_is_concrete(vd)) {
/*
* Note: we check if the vdev is concrete because when
* we complete the removal, we first change the vdev to be
* an indirect vdev (in open context), and then (in syncing
* context) clear spa_vdev_removal.
*/
free_from_removing_vdev(vd, offset, size);
} else if (vd->vdev_ops->vdev_op_remap != NULL) {
vdev_indirect_mark_obsolete(vd, offset, size);
vd->vdev_ops->vdev_op_remap(vd, offset, size,
metaslab_free_impl_cb, &checkpoint);
} else {
metaslab_free_concrete(vd, offset, size, checkpoint);
}
}
typedef struct remap_blkptr_cb_arg {
blkptr_t *rbca_bp;
spa_remap_cb_t rbca_cb;
vdev_t *rbca_remap_vd;
uint64_t rbca_remap_offset;
void *rbca_cb_arg;
} remap_blkptr_cb_arg_t;
static void
remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
remap_blkptr_cb_arg_t *rbca = arg;
blkptr_t *bp = rbca->rbca_bp;
/* We can not remap split blocks. */
if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
return;
ASSERT0(inner_offset);
if (rbca->rbca_cb != NULL) {
/*
* At this point we know that we are not handling split
* blocks and we invoke the callback on the previous
* vdev which must be indirect.
*/
ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
/* set up remap_blkptr_cb_arg for the next call */
rbca->rbca_remap_vd = vd;
rbca->rbca_remap_offset = offset;
}
/*
* The phys birth time is that of dva[0]. This ensures that we know
* when each dva was written, so that resilver can determine which
* blocks need to be scrubbed (i.e. those written during the time
* the vdev was offline). It also ensures that the key used in
* the ARC hash table is unique (i.e. dva[0] + phys_birth). If
* we didn't change the phys_birth, a lookup in the ARC for a
* remapped BP could find the data that was previously stored at
* this vdev + offset.
*/
vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
DVA_GET_VDEV(&bp->blk_dva[0]));
vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
bp->blk_phys_birth = vdev_indirect_births_physbirth(vib,
DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
DVA_SET_OFFSET(&bp->blk_dva[0], offset);
}
/*
* If the block pointer contains any indirect DVAs, modify them to refer to
* concrete DVAs. Note that this will sometimes not be possible, leaving
* the indirect DVA in place. This happens if the indirect DVA spans multiple
* segments in the mapping (i.e. it is a "split block").
*
* If the BP was remapped, calls the callback on the original dva (note the
* callback can be called multiple times if the original indirect DVA refers
* to another indirect DVA, etc).
*
* Returns TRUE if the BP was remapped.
*/
boolean_t
spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
{
remap_blkptr_cb_arg_t rbca;
if (!zfs_remap_blkptr_enable)
return (B_FALSE);
if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
return (B_FALSE);
/*
* Dedup BP's can not be remapped, because ddt_phys_select() depends
* on DVA[0] being the same in the BP as in the DDT (dedup table).
*/
if (BP_GET_DEDUP(bp))
return (B_FALSE);
/*
* Gang blocks can not be remapped, because
* zio_checksum_gang_verifier() depends on the DVA[0] that's in
* the BP used to read the gang block header (GBH) being the same
* as the DVA[0] that we allocated for the GBH.
*/
if (BP_IS_GANG(bp))
return (B_FALSE);
/*
* Embedded BP's have no DVA to remap.
*/
if (BP_GET_NDVAS(bp) < 1)
return (B_FALSE);
/*
* Note: we only remap dva[0]. If we remapped other dvas, we
* would no longer know what their phys birth txg is.
*/
dva_t *dva = &bp->blk_dva[0];
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
if (vd->vdev_ops->vdev_op_remap == NULL)
return (B_FALSE);
rbca.rbca_bp = bp;
rbca.rbca_cb = callback;
rbca.rbca_remap_vd = vd;
rbca.rbca_remap_offset = offset;
rbca.rbca_cb_arg = arg;
/*
* remap_blkptr_cb() will be called in order for each level of
* indirection, until a concrete vdev is reached or a split block is
* encountered. old_vd and old_offset are updated within the callback
* as we go from the one indirect vdev to the next one (either concrete
* or indirect again) in that order.
*/
vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
/* Check if the DVA wasn't remapped because it is a split block */
if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
return (B_FALSE);
return (B_TRUE);
}
/*
* Undo the allocation of a DVA which happened in the given transaction group.
*/
void
metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
{
metaslab_t *msp;
vdev_t *vd;
uint64_t vdev = DVA_GET_VDEV(dva);
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
ASSERT(DVA_IS_VALID(dva));
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
if (txg > spa_freeze_txg(spa))
return;
if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
(offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
(u_longlong_t)vdev, (u_longlong_t)offset,
(u_longlong_t)size);
return;
}
ASSERT(!vd->vdev_removing);
ASSERT(vdev_is_concrete(vd));
ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
if (DVA_GET_GANG(dva))
size = vdev_gang_header_asize(vd);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
mutex_enter(&msp->ms_lock);
range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
offset, size);
msp->ms_allocating_total -= size;
VERIFY(!msp->ms_condensing);
VERIFY3U(offset, >=, msp->ms_start);
VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=,
msp->ms_size);
VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
range_tree_add(msp->ms_allocatable, offset, size);
mutex_exit(&msp->ms_lock);
}
/*
* Free the block represented by the given DVA.
*/
void
metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
{
uint64_t vdev = DVA_GET_VDEV(dva);
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
vdev_t *vd = vdev_lookup_top(spa, vdev);
ASSERT(DVA_IS_VALID(dva));
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
if (DVA_GET_GANG(dva)) {
size = vdev_gang_header_asize(vd);
}
metaslab_free_impl(vd, offset, size, checkpoint);
}
/*
* Reserve some allocation slots. The reservation system must be called
* before we call into the allocator. If there aren't any available slots
* then the I/O will be throttled until an I/O completes and its slots are
* freed up. The function returns true if it was successful in placing
* the reservation.
*/
boolean_t
metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
zio_t *zio, int flags)
{
metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
uint64_t max = mca->mca_alloc_max_slots;
ASSERT(mc->mc_alloc_throttle_enabled);
if (GANG_ALLOCATION(flags) || (flags & METASLAB_MUST_RESERVE) ||
zfs_refcount_count(&mca->mca_alloc_slots) + slots <= max) {
/*
* The potential race between _count() and _add() is covered
* by the allocator lock in most cases, or irrelevant due to
* GANG_ALLOCATION() or METASLAB_MUST_RESERVE set in others.
* But even if we assume some other non-existing scenario, the
* worst that can happen is few more I/Os get to allocation
* earlier, that is not a problem.
*
* We reserve the slots individually so that we can unreserve
* them individually when an I/O completes.
*/
zfs_refcount_add_few(&mca->mca_alloc_slots, slots, zio);
zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
return (B_TRUE);
}
return (B_FALSE);
}
void
metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
int allocator, zio_t *zio)
{
metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
ASSERT(mc->mc_alloc_throttle_enabled);
zfs_refcount_remove_few(&mca->mca_alloc_slots, slots, zio);
}
static int
metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
uint64_t txg)
{
metaslab_t *msp;
spa_t *spa = vd->vdev_spa;
int error = 0;
if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
return (SET_ERROR(ENXIO));
ASSERT3P(vd->vdev_ms, !=, NULL);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
mutex_enter(&msp->ms_lock);
if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) {
error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM);
if (error == EBUSY) {
ASSERT(msp->ms_loaded);
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
error = 0;
}
}
if (error == 0 &&
!range_tree_contains(msp->ms_allocatable, offset, size))
error = SET_ERROR(ENOENT);
if (error || txg == 0) { /* txg == 0 indicates dry run */
mutex_exit(&msp->ms_lock);
return (error);
}
VERIFY(!msp->ms_condensing);
VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=,
msp->ms_size);
range_tree_remove(msp->ms_allocatable, offset, size);
range_tree_clear(msp->ms_trim, offset, size);
if (spa_writeable(spa)) { /* don't dirty if we're zdb(8) */
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (!multilist_link_active(&msp->ms_class_txg_node)) {
msp->ms_selected_txg = txg;
multilist_sublist_insert_head(mls, msp);
}
multilist_sublist_unlock(mls);
if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
vdev_dirty(vd, VDD_METASLAB, msp, txg);
range_tree_add(msp->ms_allocating[txg & TXG_MASK],
offset, size);
msp->ms_allocating_total += size;
}
mutex_exit(&msp->ms_lock);
return (0);
}
typedef struct metaslab_claim_cb_arg_t {
uint64_t mcca_txg;
int mcca_error;
} metaslab_claim_cb_arg_t;
static void
metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
(void) inner_offset;
metaslab_claim_cb_arg_t *mcca_arg = arg;
if (mcca_arg->mcca_error == 0) {
mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
size, mcca_arg->mcca_txg);
}
}
int
metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
{
if (vd->vdev_ops->vdev_op_remap != NULL) {
metaslab_claim_cb_arg_t arg;
/*
* Only zdb(8) can claim on indirect vdevs. This is used
* to detect leaks of mapped space (that are not accounted
* for in the obsolete counts, spacemap, or bpobj).
*/
ASSERT(!spa_writeable(vd->vdev_spa));
arg.mcca_error = 0;
arg.mcca_txg = txg;
vd->vdev_ops->vdev_op_remap(vd, offset, size,
metaslab_claim_impl_cb, &arg);
if (arg.mcca_error == 0) {
arg.mcca_error = metaslab_claim_concrete(vd,
offset, size, txg);
}
return (arg.mcca_error);
} else {
return (metaslab_claim_concrete(vd, offset, size, txg));
}
}
/*
* Intent log support: upon opening the pool after a crash, notify the SPA
* of blocks that the intent log has allocated for immediate write, but
* which are still considered free by the SPA because the last transaction
* group didn't commit yet.
*/
static int
metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
{
uint64_t vdev = DVA_GET_VDEV(dva);
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
vdev_t *vd;
if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
return (SET_ERROR(ENXIO));
}
ASSERT(DVA_IS_VALID(dva));
if (DVA_GET_GANG(dva))
size = vdev_gang_header_asize(vd);
return (metaslab_claim_impl(vd, offset, size, txg));
}
int
metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
zio_alloc_list_t *zal, zio_t *zio, int allocator)
{
dva_t *dva = bp->blk_dva;
dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL;
int error = 0;
ASSERT(bp->blk_birth == 0);
ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
if (mc->mc_allocator[allocator].mca_rotor == NULL) {
/* no vdevs in this class */
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (SET_ERROR(ENOSPC));
}
ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
ASSERT(BP_GET_NDVAS(bp) == 0);
ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
ASSERT3P(zal, !=, NULL);
for (int d = 0; d < ndvas; d++) {
error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
txg, flags, zal, allocator);
if (error != 0) {
for (d--; d >= 0; d--) {
metaslab_unalloc_dva(spa, &dva[d], txg);
metaslab_group_alloc_decrement(spa,
DVA_GET_VDEV(&dva[d]), zio, flags,
allocator, B_FALSE);
memset(&dva[d], 0, sizeof (dva_t));
}
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (error);
} else {
/*
* Update the metaslab group's queue depth
* based on the newly allocated dva.
*/
metaslab_group_alloc_increment(spa,
DVA_GET_VDEV(&dva[d]), zio, flags, allocator);
}
}
ASSERT(error == 0);
ASSERT(BP_GET_NDVAS(bp) == ndvas);
spa_config_exit(spa, SCL_ALLOC, FTAG);
BP_SET_BIRTH(bp, txg, 0);
return (0);
}
void
metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
/*
* If we have a checkpoint for the pool we need to make sure that
* the blocks that we free that are part of the checkpoint won't be
* reused until the checkpoint is discarded or we revert to it.
*
* The checkpoint flag is passed down the metaslab_free code path
* and is set whenever we want to add a block to the checkpoint's
* accounting. That is, we "checkpoint" blocks that existed at the
* time the checkpoint was created and are therefore referenced by
* the checkpointed uberblock.
*
* Note that, we don't checkpoint any blocks if the current
* syncing txg <= spa_checkpoint_txg. We want these frees to sync
* normally as they will be referenced by the checkpointed uberblock.
*/
boolean_t checkpoint = B_FALSE;
if (bp->blk_birth <= spa->spa_checkpoint_txg &&
spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
/*
* At this point, if the block is part of the checkpoint
* there is no way it was created in the current txg.
*/
ASSERT(!now);
ASSERT3U(spa_syncing_txg(spa), ==, txg);
checkpoint = B_TRUE;
}
spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
for (int d = 0; d < ndvas; d++) {
if (now) {
metaslab_unalloc_dva(spa, &dva[d], txg);
} else {
ASSERT3U(txg, ==, spa_syncing_txg(spa));
metaslab_free_dva(spa, &dva[d], checkpoint);
}
}
spa_config_exit(spa, SCL_FREE, FTAG);
}
int
metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
int error = 0;
ASSERT(!BP_IS_HOLE(bp));
if (txg != 0) {
/*
* First do a dry run to make sure all DVAs are claimable,
* so we don't have to unwind from partial failures below.
*/
if ((error = metaslab_claim(spa, bp, 0)) != 0)
return (error);
}
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
for (int d = 0; d < ndvas; d++) {
error = metaslab_claim_dva(spa, &dva[d], txg);
if (error != 0)
break;
}
spa_config_exit(spa, SCL_ALLOC, FTAG);
ASSERT(error == 0 || txg == 0);
return (error);
}
-void
-metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp)
-{
- const dva_t *dva = bp->blk_dva;
- int ndvas = BP_GET_NDVAS(bp);
- uint64_t psize = BP_GET_PSIZE(bp);
- int d;
- vdev_t *vd;
-
- ASSERT(!BP_IS_HOLE(bp));
- ASSERT(!BP_IS_EMBEDDED(bp));
- ASSERT(psize > 0);
-
- spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
-
- for (d = 0; d < ndvas; d++) {
- if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
- continue;
- atomic_add_64(&vd->vdev_pending_fastwrite, psize);
- }
-
- spa_config_exit(spa, SCL_VDEV, FTAG);
-}
-
-void
-metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp)
-{
- const dva_t *dva = bp->blk_dva;
- int ndvas = BP_GET_NDVAS(bp);
- uint64_t psize = BP_GET_PSIZE(bp);
- int d;
- vdev_t *vd;
-
- ASSERT(!BP_IS_HOLE(bp));
- ASSERT(!BP_IS_EMBEDDED(bp));
- ASSERT(psize > 0);
-
- spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
-
- for (d = 0; d < ndvas; d++) {
- if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
- continue;
- ASSERT3U(vd->vdev_pending_fastwrite, >=, psize);
- atomic_sub_64(&vd->vdev_pending_fastwrite, psize);
- }
-
- spa_config_exit(spa, SCL_VDEV, FTAG);
-}
-
static void
metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
(void) inner, (void) arg;
if (vd->vdev_ops == &vdev_indirect_ops)
return;
metaslab_check_free_impl(vd, offset, size);
}
static void
metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
{
metaslab_t *msp;
spa_t *spa __maybe_unused = vd->vdev_spa;
if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
return;
if (vd->vdev_ops->vdev_op_remap != NULL) {
vd->vdev_ops->vdev_op_remap(vd, offset, size,
metaslab_check_free_impl_cb, NULL);
return;
}
ASSERT(vdev_is_concrete(vd));
ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
mutex_enter(&msp->ms_lock);
if (msp->ms_loaded) {
range_tree_verify_not_present(msp->ms_allocatable,
offset, size);
}
/*
* Check all segments that currently exist in the freeing pipeline.
*
* It would intuitively make sense to also check the current allocating
* tree since metaslab_unalloc_dva() exists for extents that are
* allocated and freed in the same sync pass within the same txg.
* Unfortunately there are places (e.g. the ZIL) where we allocate a
* segment but then we free part of it within the same txg
* [see zil_sync()]. Thus, we don't call range_tree_verify() in the
* current allocating tree.
*/
range_tree_verify_not_present(msp->ms_freeing, offset, size);
range_tree_verify_not_present(msp->ms_checkpointing, offset, size);
range_tree_verify_not_present(msp->ms_freed, offset, size);
for (int j = 0; j < TXG_DEFER_SIZE; j++)
range_tree_verify_not_present(msp->ms_defer[j], offset, size);
range_tree_verify_not_present(msp->ms_trim, offset, size);
mutex_exit(&msp->ms_lock);
}
void
metaslab_check_free(spa_t *spa, const blkptr_t *bp)
{
if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
return;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
vdev_t *vd = vdev_lookup_top(spa, vdev);
uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
if (DVA_GET_GANG(&bp->blk_dva[i]))
size = vdev_gang_header_asize(vd);
ASSERT3P(vd, !=, NULL);
metaslab_check_free_impl(vd, offset, size);
}
spa_config_exit(spa, SCL_VDEV, FTAG);
}
static void
metaslab_group_disable_wait(metaslab_group_t *mg)
{
ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
while (mg->mg_disabled_updating) {
cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
}
}
static void
metaslab_group_disabled_increment(metaslab_group_t *mg)
{
ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
ASSERT(mg->mg_disabled_updating);
while (mg->mg_ms_disabled >= max_disabled_ms) {
cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
}
mg->mg_ms_disabled++;
ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms);
}
/*
* Mark the metaslab as disabled to prevent any allocations on this metaslab.
* We must also track how many metaslabs are currently disabled within a
* metaslab group and limit them to prevent allocation failures from
* occurring because all metaslabs are disabled.
*/
void
metaslab_disable(metaslab_t *msp)
{
ASSERT(!MUTEX_HELD(&msp->ms_lock));
metaslab_group_t *mg = msp->ms_group;
mutex_enter(&mg->mg_ms_disabled_lock);
/*
* To keep an accurate count of how many threads have disabled
* a specific metaslab group, we only allow one thread to mark
* the metaslab group at a time. This ensures that the value of
* ms_disabled will be accurate when we decide to mark a metaslab
* group as disabled. To do this we force all other threads
* to wait till the metaslab's mg_disabled_updating flag is no
* longer set.
*/
metaslab_group_disable_wait(mg);
mg->mg_disabled_updating = B_TRUE;
if (msp->ms_disabled == 0) {
metaslab_group_disabled_increment(mg);
}
mutex_enter(&msp->ms_lock);
msp->ms_disabled++;
mutex_exit(&msp->ms_lock);
mg->mg_disabled_updating = B_FALSE;
cv_broadcast(&mg->mg_ms_disabled_cv);
mutex_exit(&mg->mg_ms_disabled_lock);
}
void
metaslab_enable(metaslab_t *msp, boolean_t sync, boolean_t unload)
{
metaslab_group_t *mg = msp->ms_group;
spa_t *spa = mg->mg_vd->vdev_spa;
/*
* Wait for the outstanding IO to be synced to prevent newly
* allocated blocks from being overwritten. This used by
* initialize and TRIM which are modifying unallocated space.
*/
if (sync)
txg_wait_synced(spa_get_dsl(spa), 0);
mutex_enter(&mg->mg_ms_disabled_lock);
mutex_enter(&msp->ms_lock);
if (--msp->ms_disabled == 0) {
mg->mg_ms_disabled--;
cv_broadcast(&mg->mg_ms_disabled_cv);
if (unload)
metaslab_unload(msp);
}
mutex_exit(&msp->ms_lock);
mutex_exit(&mg->mg_ms_disabled_lock);
}
void
metaslab_set_unflushed_dirty(metaslab_t *ms, boolean_t dirty)
{
ms->ms_unflushed_dirty = dirty;
}
static void
metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx)
{
vdev_t *vd = ms->ms_group->mg_vd;
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa_meta_objset(spa);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
metaslab_unflushed_phys_t entry = {
.msp_unflushed_txg = metaslab_unflushed_txg(ms),
};
uint64_t entry_size = sizeof (entry);
uint64_t entry_offset = ms->ms_id * entry_size;
uint64_t object = 0;
int err = zap_lookup(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
&object);
if (err == ENOENT) {
object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA,
SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx);
VERIFY0(zap_add(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
&object, tx));
} else {
VERIFY0(err);
}
dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size,
&entry, tx);
}
void
metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx)
{
ms->ms_unflushed_txg = txg;
metaslab_update_ondisk_flush_data(ms, tx);
}
boolean_t
metaslab_unflushed_dirty(metaslab_t *ms)
{
return (ms->ms_unflushed_dirty);
}
uint64_t
metaslab_unflushed_txg(metaslab_t *ms)
{
return (ms->ms_unflushed_txg);
}
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, aliquot, U64, ZMOD_RW,
"Allocation granularity (a.k.a. stripe size)");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_load, INT, ZMOD_RW,
"Load all metaslabs when pool is first opened");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW,
"Prevent metaslabs from being unloaded");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW,
"Preload potential metaslabs during reassessment");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, UINT, ZMOD_RW,
"Delay in txgs after metaslab was last used before unloading");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, UINT, ZMOD_RW,
"Delay in milliseconds after metaslab was last used before unloading");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, UINT, ZMOD_RW,
"Percentage of metaslab group size that should be free to make it "
"eligible for allocation");
ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, UINT, ZMOD_RW,
"Percentage of metaslab group size that should be considered eligible "
"for allocations unless all metaslab groups within the metaslab class "
"have also crossed this threshold");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT,
ZMOD_RW,
"Use the fragmentation metric to prefer less fragmented metaslabs");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, UINT,
ZMOD_RW, "Fragmentation for metaslab to allow allocation");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW,
"Prefer metaslabs with lower LBAs");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, bias_enabled, INT, ZMOD_RW,
"Enable metaslab group biasing");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, segment_weight_enabled, INT,
ZMOD_RW, "Enable segment-based metaslab selection");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW,
"Segment-based metaslab selection maximum buckets before switching");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, U64, ZMOD_RW,
- "Blocks larger than this size are forced to be gang blocks");
+ "Blocks larger than this size are sometimes forced to be gang blocks");
+
+ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging_pct, UINT, ZMOD_RW,
+ "Percentage of large blocks that will be forced to be gang blocks");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, UINT, ZMOD_RW,
"Max distance (bytes) to search forward before using size tree");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW,
"When looking in size tree, use largest segment instead of exact fit");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, U64,
ZMOD_RW, "How long to trust the cached max chunk size of a metaslab");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, UINT, ZMOD_RW,
"Percentage of memory that can be used to store metaslab range trees");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT,
ZMOD_RW, "Try hard to allocate before ganging");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, UINT, ZMOD_RW,
"Normally only consider this many of the best metaslabs in each vdev");
diff --git a/sys/contrib/openzfs/module/zfs/spa_misc.c b/sys/contrib/openzfs/module/zfs/spa_misc.c
index 06f640769043..3b355e0debcc 100644
--- a/sys/contrib/openzfs/module/zfs/spa_misc.c
+++ b/sys/contrib/openzfs/module/zfs/spa_misc.c
@@ -1,3004 +1,3005 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/zfs_chksum.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_trim.h>
#include <sys/vdev_file.h>
#include <sys/vdev_raidz.h>
#include <sys/metaslab.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/unique.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/fm/util.h>
#include <sys/dsl_scan.h>
#include <sys/fs/zfs.h>
#include <sys/metaslab_impl.h>
#include <sys/arc.h>
#include <sys/brt.h>
#include <sys/ddt.h>
#include <sys/kstat.h>
#include "zfs_prop.h"
#include <sys/btree.h>
#include <sys/zfeature.h>
#include <sys/qat.h>
#include <sys/zstd/zstd.h>
/*
* SPA locking
*
* There are three basic locks for managing spa_t structures:
*
* spa_namespace_lock (global mutex)
*
* This lock must be acquired to do any of the following:
*
* - Lookup a spa_t by name
* - Add or remove a spa_t from the namespace
* - Increase spa_refcount from non-zero
* - Check if spa_refcount is zero
* - Rename a spa_t
* - add/remove/attach/detach devices
* - Held for the duration of create/destroy/import/export
*
* It does not need to handle recursion. A create or destroy may
* reference objects (files or zvols) in other pools, but by
* definition they must have an existing reference, and will never need
* to lookup a spa_t by name.
*
* spa_refcount (per-spa zfs_refcount_t protected by mutex)
*
* This reference count keep track of any active users of the spa_t. The
* spa_t cannot be destroyed or freed while this is non-zero. Internally,
* the refcount is never really 'zero' - opening a pool implicitly keeps
* some references in the DMU. Internally we check against spa_minref, but
* present the image of a zero/non-zero value to consumers.
*
* spa_config_lock[] (per-spa array of rwlocks)
*
* This protects the spa_t from config changes, and must be held in
* the following circumstances:
*
* - RW_READER to perform I/O to the spa
* - RW_WRITER to change the vdev config
*
* The locking order is fairly straightforward:
*
* spa_namespace_lock -> spa_refcount
*
* The namespace lock must be acquired to increase the refcount from 0
* or to check if it is zero.
*
* spa_refcount -> spa_config_lock[]
*
* There must be at least one valid reference on the spa_t to acquire
* the config lock.
*
* spa_namespace_lock -> spa_config_lock[]
*
* The namespace lock must always be taken before the config lock.
*
*
* The spa_namespace_lock can be acquired directly and is globally visible.
*
* The namespace is manipulated using the following functions, all of which
* require the spa_namespace_lock to be held.
*
* spa_lookup() Lookup a spa_t by name.
*
* spa_add() Create a new spa_t in the namespace.
*
* spa_remove() Remove a spa_t from the namespace. This also
* frees up any memory associated with the spa_t.
*
* spa_next() Returns the next spa_t in the system, or the
* first if NULL is passed.
*
* spa_evict_all() Shutdown and remove all spa_t structures in
* the system.
*
* spa_guid_exists() Determine whether a pool/device guid exists.
*
* The spa_refcount is manipulated using the following functions:
*
* spa_open_ref() Adds a reference to the given spa_t. Must be
* called with spa_namespace_lock held if the
* refcount is currently zero.
*
* spa_close() Remove a reference from the spa_t. This will
* not free the spa_t or remove it from the
* namespace. No locking is required.
*
* spa_refcount_zero() Returns true if the refcount is currently
* zero. Must be called with spa_namespace_lock
* held.
*
* The spa_config_lock[] is an array of rwlocks, ordered as follows:
* SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
* spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
*
* To read the configuration, it suffices to hold one of these locks as reader.
* To modify the configuration, you must hold all locks as writer. To modify
* vdev state without altering the vdev tree's topology (e.g. online/offline),
* you must hold SCL_STATE and SCL_ZIO as writer.
*
* We use these distinct config locks to avoid recursive lock entry.
* For example, spa_sync() (which holds SCL_CONFIG as reader) induces
* block allocations (SCL_ALLOC), which may require reading space maps
* from disk (dmu_read() -> zio_read() -> SCL_ZIO).
*
* The spa config locks cannot be normal rwlocks because we need the
* ability to hand off ownership. For example, SCL_ZIO is acquired
* by the issuing thread and later released by an interrupt thread.
* They do, however, obey the usual write-wanted semantics to prevent
* writer (i.e. system administrator) starvation.
*
* The lock acquisition rules are as follows:
*
* SCL_CONFIG
* Protects changes to the vdev tree topology, such as vdev
* add/remove/attach/detach. Protects the dirty config list
* (spa_config_dirty_list) and the set of spares and l2arc devices.
*
* SCL_STATE
* Protects changes to pool state and vdev state, such as vdev
* online/offline/fault/degrade/clear. Protects the dirty state list
* (spa_state_dirty_list) and global pool state (spa_state).
*
* SCL_ALLOC
* Protects changes to metaslab groups and classes.
* Held as reader by metaslab_alloc() and metaslab_claim().
*
* SCL_ZIO
* Held by bp-level zios (those which have no io_vd upon entry)
* to prevent changes to the vdev tree. The bp-level zio implicitly
* protects all of its vdev child zios, which do not hold SCL_ZIO.
*
* SCL_FREE
* Protects changes to metaslab groups and classes.
* Held as reader by metaslab_free(). SCL_FREE is distinct from
* SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
* blocks in zio_done() while another i/o that holds either
* SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
*
* SCL_VDEV
* Held as reader to prevent changes to the vdev tree during trivial
* inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the
* other locks, and lower than all of them, to ensure that it's safe
* to acquire regardless of caller context.
*
* In addition, the following rules apply:
*
* (a) spa_props_lock protects pool properties, spa_config and spa_config_list.
* The lock ordering is SCL_CONFIG > spa_props_lock.
*
* (b) I/O operations on leaf vdevs. For any zio operation that takes
* an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
* or zio_write_phys() -- the caller must ensure that the config cannot
* cannot change in the interim, and that the vdev cannot be reopened.
* SCL_STATE as reader suffices for both.
*
* The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
*
* spa_vdev_enter() Acquire the namespace lock and the config lock
* for writing.
*
* spa_vdev_exit() Release the config lock, wait for all I/O
* to complete, sync the updated configs to the
* cache, and release the namespace lock.
*
* vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
* Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
* locking is, always, based on spa_namespace_lock and spa_config_lock[].
*/
static avl_tree_t spa_namespace_avl;
kmutex_t spa_namespace_lock;
static kcondvar_t spa_namespace_cv;
static const int spa_max_replication_override = SPA_DVAS_PER_BP;
static kmutex_t spa_spare_lock;
static avl_tree_t spa_spare_avl;
static kmutex_t spa_l2cache_lock;
static avl_tree_t spa_l2cache_avl;
spa_mode_t spa_mode_global = SPA_MODE_UNINIT;
#ifdef ZFS_DEBUG
/*
* Everything except dprintf, set_error, spa, and indirect_remap is on
* by default in debug builds.
*/
int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR |
ZFS_DEBUG_INDIRECT_REMAP);
#else
int zfs_flags = 0;
#endif
/*
* zfs_recover can be set to nonzero to attempt to recover from
* otherwise-fatal errors, typically caused by on-disk corruption. When
* set, calls to zfs_panic_recover() will turn into warning messages.
* This should only be used as a last resort, as it typically results
* in leaked space, or worse.
*/
int zfs_recover = B_FALSE;
/*
* If destroy encounters an EIO while reading metadata (e.g. indirect
* blocks), space referenced by the missing metadata can not be freed.
* Normally this causes the background destroy to become "stalled", as
* it is unable to make forward progress. While in this stalled state,
* all remaining space to free from the error-encountering filesystem is
* "temporarily leaked". Set this flag to cause it to ignore the EIO,
* permanently leak the space from indirect blocks that can not be read,
* and continue to free everything else that it can.
*
* The default, "stalling" behavior is useful if the storage partially
* fails (i.e. some but not all i/os fail), and then later recovers. In
* this case, we will be able to continue pool operations while it is
* partially failed, and when it recovers, we can continue to free the
* space, with no leaks. However, note that this case is actually
* fairly rare.
*
* Typically pools either (a) fail completely (but perhaps temporarily,
* e.g. a top-level vdev going offline), or (b) have localized,
* permanent errors (e.g. disk returns the wrong data due to bit flip or
* firmware bug). In case (a), this setting does not matter because the
* pool will be suspended and the sync thread will not be able to make
* forward progress regardless. In case (b), because the error is
* permanent, the best we can do is leak the minimum amount of space,
* which is what setting this flag will do. Therefore, it is reasonable
* for this flag to normally be set, but we chose the more conservative
* approach of not setting it, so that there is no possibility of
* leaking space in the "partial temporary" failure case.
*/
int zfs_free_leak_on_eio = B_FALSE;
/*
* Expiration time in milliseconds. This value has two meanings. First it is
* used to determine when the spa_deadman() logic should fire. By default the
* spa_deadman() will fire if spa_sync() has not completed in 600 seconds.
* Secondly, the value determines if an I/O is considered "hung". Any I/O that
* has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
* in one of three behaviors controlled by zfs_deadman_failmode.
*/
uint64_t zfs_deadman_synctime_ms = 600000UL; /* 10 min. */
/*
* This value controls the maximum amount of time zio_wait() will block for an
* outstanding IO. By default this is 300 seconds at which point the "hung"
* behavior will be applied as described for zfs_deadman_synctime_ms.
*/
uint64_t zfs_deadman_ziotime_ms = 300000UL; /* 5 min. */
/*
* Check time in milliseconds. This defines the frequency at which we check
* for hung I/O.
*/
uint64_t zfs_deadman_checktime_ms = 60000UL; /* 1 min. */
/*
* By default the deadman is enabled.
*/
int zfs_deadman_enabled = B_TRUE;
/*
* Controls the behavior of the deadman when it detects a "hung" I/O.
* Valid values are zfs_deadman_failmode=<wait|continue|panic>.
*
* wait - Wait for the "hung" I/O (default)
* continue - Attempt to recover from a "hung" I/O
* panic - Panic the system
*/
const char *zfs_deadman_failmode = "wait";
/*
* The worst case is single-sector max-parity RAID-Z blocks, in which
* case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
* times the size; so just assume that. Add to this the fact that
* we can have up to 3 DVAs per bp, and one more factor of 2 because
* the block may be dittoed with up to 3 DVAs by ddt_sync(). All together,
* the worst case is:
* (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
*/
uint_t spa_asize_inflation = 24;
/*
* Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
* the pool to be consumed (bounded by spa_max_slop). This ensures that we
* don't run the pool completely out of space, due to unaccounted changes (e.g.
* to the MOS). It also limits the worst-case time to allocate space. If we
* have less than this amount of free space, most ZPL operations (e.g. write,
* create) will return ENOSPC. The ZIL metaslabs (spa_embedded_log_class) are
* also part of this 3.2% of space which can't be consumed by normal writes;
* the slop space "proper" (spa_get_slop_space()) is decreased by the embedded
* log space.
*
* Certain operations (e.g. file removal, most administrative actions) can
* use half the slop space. They will only return ENOSPC if less than half
* the slop space is free. Typically, once the pool has less than the slop
* space free, the user will use these operations to free up space in the pool.
* These are the operations that call dsl_pool_adjustedsize() with the netfree
* argument set to TRUE.
*
* Operations that are almost guaranteed to free up space in the absence of
* a pool checkpoint can use up to three quarters of the slop space
* (e.g zfs destroy).
*
* A very restricted set of operations are always permitted, regardless of
* the amount of free space. These are the operations that call
* dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net
* increase in the amount of space used, it is possible to run the pool
* completely out of space, causing it to be permanently read-only.
*
* Note that on very small pools, the slop space will be larger than
* 3.2%, in an effort to have it be at least spa_min_slop (128MB),
* but we never allow it to be more than half the pool size.
*
* Further, on very large pools, the slop space will be smaller than
* 3.2%, to avoid reserving much more space than we actually need; bounded
* by spa_max_slop (128GB).
*
* See also the comments in zfs_space_check_t.
*/
uint_t spa_slop_shift = 5;
static const uint64_t spa_min_slop = 128ULL * 1024 * 1024;
static const uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024;
static const int spa_allocators = 4;
void
spa_load_failed(spa_t *spa, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name,
spa->spa_trust_config ? "trusted" : "untrusted", buf);
}
void
spa_load_note(spa_t *spa, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name,
spa->spa_trust_config ? "trusted" : "untrusted", buf);
}
/*
* By default dedup and user data indirects land in the special class
*/
static int zfs_ddt_data_is_special = B_TRUE;
static int zfs_user_indirect_is_special = B_TRUE;
/*
* The percentage of special class final space reserved for metadata only.
* Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only
* let metadata into the class.
*/
static uint_t zfs_special_class_metadata_reserve_pct = 25;
/*
* ==========================================================================
* SPA config locking
* ==========================================================================
*/
static void
spa_config_lock_init(spa_t *spa)
{
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
scl->scl_writer = NULL;
scl->scl_write_wanted = 0;
scl->scl_count = 0;
}
}
static void
spa_config_lock_destroy(spa_t *spa)
{
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_destroy(&scl->scl_lock);
cv_destroy(&scl->scl_cv);
ASSERT(scl->scl_writer == NULL);
ASSERT(scl->scl_write_wanted == 0);
ASSERT(scl->scl_count == 0);
}
}
int
spa_config_tryenter(spa_t *spa, int locks, const void *tag, krw_t rw)
{
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
mutex_enter(&scl->scl_lock);
if (rw == RW_READER) {
if (scl->scl_writer || scl->scl_write_wanted) {
mutex_exit(&scl->scl_lock);
spa_config_exit(spa, locks & ((1 << i) - 1),
tag);
return (0);
}
} else {
ASSERT(scl->scl_writer != curthread);
if (scl->scl_count != 0) {
mutex_exit(&scl->scl_lock);
spa_config_exit(spa, locks & ((1 << i) - 1),
tag);
return (0);
}
scl->scl_writer = curthread;
}
scl->scl_count++;
mutex_exit(&scl->scl_lock);
}
return (1);
}
static void
spa_config_enter_impl(spa_t *spa, int locks, const void *tag, krw_t rw,
int mmp_flag)
{
(void) tag;
int wlocks_held = 0;
ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (scl->scl_writer == curthread)
wlocks_held |= (1 << i);
if (!(locks & (1 << i)))
continue;
mutex_enter(&scl->scl_lock);
if (rw == RW_READER) {
while (scl->scl_writer ||
(!mmp_flag && scl->scl_write_wanted)) {
cv_wait(&scl->scl_cv, &scl->scl_lock);
}
} else {
ASSERT(scl->scl_writer != curthread);
while (scl->scl_count != 0) {
scl->scl_write_wanted++;
cv_wait(&scl->scl_cv, &scl->scl_lock);
scl->scl_write_wanted--;
}
scl->scl_writer = curthread;
}
scl->scl_count++;
mutex_exit(&scl->scl_lock);
}
ASSERT3U(wlocks_held, <=, locks);
}
void
spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw)
{
spa_config_enter_impl(spa, locks, tag, rw, 0);
}
/*
* The spa_config_enter_mmp() allows the mmp thread to cut in front of
* outstanding write lock requests. This is needed since the mmp updates are
* time sensitive and failure to service them promptly will result in a
* suspended pool. This pool suspension has been seen in practice when there is
* a single disk in a pool that is responding slowly and presumably about to
* fail.
*/
void
spa_config_enter_mmp(spa_t *spa, int locks, const void *tag, krw_t rw)
{
spa_config_enter_impl(spa, locks, tag, rw, 1);
}
void
spa_config_exit(spa_t *spa, int locks, const void *tag)
{
(void) tag;
for (int i = SCL_LOCKS - 1; i >= 0; i--) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
mutex_enter(&scl->scl_lock);
ASSERT(scl->scl_count > 0);
if (--scl->scl_count == 0) {
ASSERT(scl->scl_writer == NULL ||
scl->scl_writer == curthread);
scl->scl_writer = NULL; /* OK in either case */
cv_broadcast(&scl->scl_cv);
}
mutex_exit(&scl->scl_lock);
}
}
int
spa_config_held(spa_t *spa, int locks, krw_t rw)
{
int locks_held = 0;
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
if ((rw == RW_READER && scl->scl_count != 0) ||
(rw == RW_WRITER && scl->scl_writer == curthread))
locks_held |= 1 << i;
}
return (locks_held);
}
/*
* ==========================================================================
* SPA namespace functions
* ==========================================================================
*/
/*
* Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held.
* Returns NULL if no matching spa_t is found.
*/
spa_t *
spa_lookup(const char *name)
{
static spa_t search; /* spa_t is large; don't allocate on stack */
spa_t *spa;
avl_index_t where;
char *cp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
(void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
/*
* If it's a full dataset name, figure out the pool name and
* just use that.
*/
cp = strpbrk(search.spa_name, "/@#");
if (cp != NULL)
*cp = '\0';
spa = avl_find(&spa_namespace_avl, &search, &where);
return (spa);
}
/*
* Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
* If the zfs_deadman_enabled flag is set then it inspects all vdev queues
* looking for potentially hung I/Os.
*/
void
spa_deadman(void *arg)
{
spa_t *spa = arg;
/* Disable the deadman if the pool is suspended. */
if (spa_suspended(spa))
return;
zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
(gethrtime() - spa->spa_sync_starttime) / NANOSEC,
(u_longlong_t)++spa->spa_deadman_calls);
if (zfs_deadman_enabled)
vdev_deadman(spa->spa_root_vdev, FTAG);
spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
MSEC_TO_TICK(zfs_deadman_checktime_ms));
}
static int
spa_log_sm_sort_by_txg(const void *va, const void *vb)
{
const spa_log_sm_t *a = va;
const spa_log_sm_t *b = vb;
return (TREE_CMP(a->sls_txg, b->sls_txg));
}
/*
* Create an uninitialized spa_t with the given name. Requires
* spa_namespace_lock. The caller must ensure that the spa_t doesn't already
* exist by calling spa_lookup() first.
*/
spa_t *
spa_add(const char *name, nvlist_t *config, const char *altroot)
{
spa_t *spa;
spa_config_dirent_t *dp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL);
for (int t = 0; t < TXG_SIZE; t++)
bplist_create(&spa->spa_free_bplist[t]);
(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
spa->spa_state = POOL_STATE_UNINITIALIZED;
spa->spa_freeze_txg = UINT64_MAX;
spa->spa_final_txg = UINT64_MAX;
spa->spa_load_max_txg = UINT64_MAX;
spa->spa_proc = &p0;
spa->spa_proc_state = SPA_PROC_NONE;
spa->spa_trust_config = B_TRUE;
spa->spa_hostid = zone_get_hostid(NULL);
spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms);
spa_set_deadman_failmode(spa, zfs_deadman_failmode);
zfs_refcount_create(&spa->spa_refcount);
spa_config_lock_init(spa);
spa_stats_init(spa);
avl_add(&spa_namespace_avl, spa);
/*
* Set the alternate root, if there is one.
*/
if (altroot)
spa->spa_root = spa_strdup(altroot);
spa->spa_alloc_count = spa_allocators;
spa->spa_allocs = kmem_zalloc(spa->spa_alloc_count *
sizeof (spa_alloc_t), KM_SLEEP);
for (int i = 0; i < spa->spa_alloc_count; i++) {
mutex_init(&spa->spa_allocs[i].spaa_lock, NULL, MUTEX_DEFAULT,
NULL);
avl_create(&spa->spa_allocs[i].spaa_tree, zio_bookmark_compare,
sizeof (zio_t), offsetof(zio_t, io_queue_node.a));
}
avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed,
sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node));
avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg,
sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node));
list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t),
offsetof(log_summary_entry_t, lse_node));
/*
* Every pool starts with the default cachefile
*/
list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
offsetof(spa_config_dirent_t, scd_link));
dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
list_insert_head(&spa->spa_config_list, dp);
VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
if (config != NULL) {
nvlist_t *features;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
&features) == 0) {
VERIFY(nvlist_dup(features, &spa->spa_label_features,
0) == 0);
}
VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
}
if (spa->spa_label_features == NULL) {
VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
}
spa->spa_min_ashift = INT_MAX;
spa->spa_max_ashift = 0;
spa->spa_min_alloc = INT_MAX;
+ spa->spa_gcd_alloc = INT_MAX;
/* Reset cached value */
spa->spa_dedup_dspace = ~0ULL;
/*
* As a pool is being created, treat all features as disabled by
* setting SPA_FEATURE_DISABLED for all entries in the feature
* refcount cache.
*/
for (int i = 0; i < SPA_FEATURES; i++) {
spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
}
list_create(&spa->spa_leaf_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_leaf_node));
return (spa);
}
/*
* Removes a spa_t from the namespace, freeing up any memory used. Requires
* spa_namespace_lock. This is called only after the spa_t has been closed and
* deactivated.
*/
void
spa_remove(spa_t *spa)
{
spa_config_dirent_t *dp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED);
ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
ASSERT0(spa->spa_waiters);
nvlist_free(spa->spa_config_splitting);
avl_remove(&spa_namespace_avl, spa);
cv_broadcast(&spa_namespace_cv);
if (spa->spa_root)
spa_strfree(spa->spa_root);
while ((dp = list_remove_head(&spa->spa_config_list)) != NULL) {
if (dp->scd_path != NULL)
spa_strfree(dp->scd_path);
kmem_free(dp, sizeof (spa_config_dirent_t));
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
avl_destroy(&spa->spa_allocs[i].spaa_tree);
mutex_destroy(&spa->spa_allocs[i].spaa_lock);
}
kmem_free(spa->spa_allocs, spa->spa_alloc_count *
sizeof (spa_alloc_t));
avl_destroy(&spa->spa_metaslabs_by_flushed);
avl_destroy(&spa->spa_sm_logs_by_txg);
list_destroy(&spa->spa_log_summary);
list_destroy(&spa->spa_config_list);
list_destroy(&spa->spa_leaf_list);
nvlist_free(spa->spa_label_features);
nvlist_free(spa->spa_load_info);
nvlist_free(spa->spa_feat_stats);
spa_config_set(spa, NULL);
zfs_refcount_destroy(&spa->spa_refcount);
spa_stats_destroy(spa);
spa_config_lock_destroy(spa);
for (int t = 0; t < TXG_SIZE; t++)
bplist_destroy(&spa->spa_free_bplist[t]);
zio_checksum_templates_free(spa);
cv_destroy(&spa->spa_async_cv);
cv_destroy(&spa->spa_evicting_os_cv);
cv_destroy(&spa->spa_proc_cv);
cv_destroy(&spa->spa_scrub_io_cv);
cv_destroy(&spa->spa_suspend_cv);
cv_destroy(&spa->spa_activities_cv);
cv_destroy(&spa->spa_waiters_cv);
mutex_destroy(&spa->spa_flushed_ms_lock);
mutex_destroy(&spa->spa_async_lock);
mutex_destroy(&spa->spa_errlist_lock);
mutex_destroy(&spa->spa_errlog_lock);
mutex_destroy(&spa->spa_evicting_os_lock);
mutex_destroy(&spa->spa_history_lock);
mutex_destroy(&spa->spa_proc_lock);
mutex_destroy(&spa->spa_props_lock);
mutex_destroy(&spa->spa_cksum_tmpls_lock);
mutex_destroy(&spa->spa_scrub_lock);
mutex_destroy(&spa->spa_suspend_lock);
mutex_destroy(&spa->spa_vdev_top_lock);
mutex_destroy(&spa->spa_feat_stats_lock);
mutex_destroy(&spa->spa_activities_lock);
kmem_free(spa, sizeof (spa_t));
}
/*
* Given a pool, return the next pool in the namespace, or NULL if there is
* none. If 'prev' is NULL, return the first pool.
*/
spa_t *
spa_next(spa_t *prev)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (prev)
return (AVL_NEXT(&spa_namespace_avl, prev));
else
return (avl_first(&spa_namespace_avl));
}
/*
* ==========================================================================
* SPA refcount functions
* ==========================================================================
*/
/*
* Add a reference to the given spa_t. Must have at least one reference, or
* have the namespace lock held.
*/
void
spa_open_ref(spa_t *spa, const void *tag)
{
ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
MUTEX_HELD(&spa_namespace_lock));
(void) zfs_refcount_add(&spa->spa_refcount, tag);
}
/*
* Remove a reference to the given spa_t. Must have at least one reference, or
* have the namespace lock held.
*/
void
spa_close(spa_t *spa, const void *tag)
{
ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
MUTEX_HELD(&spa_namespace_lock));
(void) zfs_refcount_remove(&spa->spa_refcount, tag);
}
/*
* Remove a reference to the given spa_t held by a dsl dir that is
* being asynchronously released. Async releases occur from a taskq
* performing eviction of dsl datasets and dirs. The namespace lock
* isn't held and the hold by the object being evicted may contribute to
* spa_minref (e.g. dataset or directory released during pool export),
* so the asserts in spa_close() do not apply.
*/
void
spa_async_close(spa_t *spa, const void *tag)
{
(void) zfs_refcount_remove(&spa->spa_refcount, tag);
}
/*
* Check to see if the spa refcount is zero. Must be called with
* spa_namespace_lock held. We really compare against spa_minref, which is the
* number of references acquired when opening a pool
*/
boolean_t
spa_refcount_zero(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref);
}
/*
* ==========================================================================
* SPA spare and l2cache tracking
* ==========================================================================
*/
/*
* Hot spares and cache devices are tracked using the same code below,
* for 'auxiliary' devices.
*/
typedef struct spa_aux {
uint64_t aux_guid;
uint64_t aux_pool;
avl_node_t aux_avl;
int aux_count;
} spa_aux_t;
static inline int
spa_aux_compare(const void *a, const void *b)
{
const spa_aux_t *sa = (const spa_aux_t *)a;
const spa_aux_t *sb = (const spa_aux_t *)b;
return (TREE_CMP(sa->aux_guid, sb->aux_guid));
}
static void
spa_aux_add(vdev_t *vd, avl_tree_t *avl)
{
avl_index_t where;
spa_aux_t search;
spa_aux_t *aux;
search.aux_guid = vd->vdev_guid;
if ((aux = avl_find(avl, &search, &where)) != NULL) {
aux->aux_count++;
} else {
aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
aux->aux_guid = vd->vdev_guid;
aux->aux_count = 1;
avl_insert(avl, aux, where);
}
}
static void
spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
{
spa_aux_t search;
spa_aux_t *aux;
avl_index_t where;
search.aux_guid = vd->vdev_guid;
aux = avl_find(avl, &search, &where);
ASSERT(aux != NULL);
if (--aux->aux_count == 0) {
avl_remove(avl, aux);
kmem_free(aux, sizeof (spa_aux_t));
} else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
aux->aux_pool = 0ULL;
}
}
static boolean_t
spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
{
spa_aux_t search, *found;
search.aux_guid = guid;
found = avl_find(avl, &search, NULL);
if (pool) {
if (found)
*pool = found->aux_pool;
else
*pool = 0ULL;
}
if (refcnt) {
if (found)
*refcnt = found->aux_count;
else
*refcnt = 0;
}
return (found != NULL);
}
static void
spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
{
spa_aux_t search, *found;
avl_index_t where;
search.aux_guid = vd->vdev_guid;
found = avl_find(avl, &search, &where);
ASSERT(found != NULL);
ASSERT(found->aux_pool == 0ULL);
found->aux_pool = spa_guid(vd->vdev_spa);
}
/*
* Spares are tracked globally due to the following constraints:
*
* - A spare may be part of multiple pools.
* - A spare may be added to a pool even if it's actively in use within
* another pool.
* - A spare in use in any pool can only be the source of a replacement if
* the target is a spare in the same pool.
*
* We keep track of all spares on the system through the use of a reference
* counted AVL tree. When a vdev is added as a spare, or used as a replacement
* spare, then we bump the reference count in the AVL tree. In addition, we set
* the 'vdev_isspare' member to indicate that the device is a spare (active or
* inactive). When a spare is made active (used to replace a device in the
* pool), we also keep track of which pool its been made a part of.
*
* The 'spa_spare_lock' protects the AVL tree. These functions are normally
* called under the spa_namespace lock as part of vdev reconfiguration. The
* separate spare lock exists for the status query path, which does not need to
* be completely consistent with respect to other vdev configuration changes.
*/
static int
spa_spare_compare(const void *a, const void *b)
{
return (spa_aux_compare(a, b));
}
void
spa_spare_add(vdev_t *vd)
{
mutex_enter(&spa_spare_lock);
ASSERT(!vd->vdev_isspare);
spa_aux_add(vd, &spa_spare_avl);
vd->vdev_isspare = B_TRUE;
mutex_exit(&spa_spare_lock);
}
void
spa_spare_remove(vdev_t *vd)
{
mutex_enter(&spa_spare_lock);
ASSERT(vd->vdev_isspare);
spa_aux_remove(vd, &spa_spare_avl);
vd->vdev_isspare = B_FALSE;
mutex_exit(&spa_spare_lock);
}
boolean_t
spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
{
boolean_t found;
mutex_enter(&spa_spare_lock);
found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
mutex_exit(&spa_spare_lock);
return (found);
}
void
spa_spare_activate(vdev_t *vd)
{
mutex_enter(&spa_spare_lock);
ASSERT(vd->vdev_isspare);
spa_aux_activate(vd, &spa_spare_avl);
mutex_exit(&spa_spare_lock);
}
/*
* Level 2 ARC devices are tracked globally for the same reasons as spares.
* Cache devices currently only support one pool per cache device, and so
* for these devices the aux reference count is currently unused beyond 1.
*/
static int
spa_l2cache_compare(const void *a, const void *b)
{
return (spa_aux_compare(a, b));
}
void
spa_l2cache_add(vdev_t *vd)
{
mutex_enter(&spa_l2cache_lock);
ASSERT(!vd->vdev_isl2cache);
spa_aux_add(vd, &spa_l2cache_avl);
vd->vdev_isl2cache = B_TRUE;
mutex_exit(&spa_l2cache_lock);
}
void
spa_l2cache_remove(vdev_t *vd)
{
mutex_enter(&spa_l2cache_lock);
ASSERT(vd->vdev_isl2cache);
spa_aux_remove(vd, &spa_l2cache_avl);
vd->vdev_isl2cache = B_FALSE;
mutex_exit(&spa_l2cache_lock);
}
boolean_t
spa_l2cache_exists(uint64_t guid, uint64_t *pool)
{
boolean_t found;
mutex_enter(&spa_l2cache_lock);
found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
mutex_exit(&spa_l2cache_lock);
return (found);
}
void
spa_l2cache_activate(vdev_t *vd)
{
mutex_enter(&spa_l2cache_lock);
ASSERT(vd->vdev_isl2cache);
spa_aux_activate(vd, &spa_l2cache_avl);
mutex_exit(&spa_l2cache_lock);
}
/*
* ==========================================================================
* SPA vdev locking
* ==========================================================================
*/
/*
* Lock the given spa_t for the purpose of adding or removing a vdev.
* Grabs the global spa_namespace_lock plus the spa config lock for writing.
* It returns the next transaction group for the spa_t.
*/
uint64_t
spa_vdev_enter(spa_t *spa)
{
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
vdev_autotrim_stop_all(spa);
return (spa_vdev_config_enter(spa));
}
/*
* The same as spa_vdev_enter() above but additionally takes the guid of
* the vdev being detached. When there is a rebuild in process it will be
* suspended while the vdev tree is modified then resumed by spa_vdev_exit().
* The rebuild is canceled if only a single child remains after the detach.
*/
uint64_t
spa_vdev_detach_enter(spa_t *spa, uint64_t guid)
{
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
vdev_autotrim_stop_all(spa);
if (guid != 0) {
vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd) {
vdev_rebuild_stop_wait(vd->vdev_top);
}
}
return (spa_vdev_config_enter(spa));
}
/*
* Internal implementation for spa_vdev_enter(). Used when a vdev
* operation requires multiple syncs (i.e. removing a device) while
* keeping the spa_namespace_lock held.
*/
uint64_t
spa_vdev_config_enter(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
return (spa_last_synced_txg(spa) + 1);
}
/*
* Used in combination with spa_vdev_config_enter() to allow the syncing
* of multiple transactions without releasing the spa_namespace_lock.
*/
void
spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error,
const char *tag)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
int config_changed = B_FALSE;
ASSERT(txg > spa_last_synced_txg(spa));
spa->spa_pending_vdev = NULL;
/*
* Reassess the DTLs.
*/
vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE, B_FALSE);
if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
config_changed = B_TRUE;
spa->spa_config_generation++;
}
/*
* Verify the metaslab classes.
*/
ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_embedded_log_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0);
spa_config_exit(spa, SCL_ALL, spa);
/*
* Panic the system if the specified tag requires it. This
* is useful for ensuring that configurations are updated
* transactionally.
*/
if (zio_injection_enabled)
zio_handle_panic_injection(spa, tag, 0);
/*
* Note: this txg_wait_synced() is important because it ensures
* that there won't be more than one config change per txg.
* This allows us to use the txg as the generation number.
*/
if (error == 0)
txg_wait_synced(spa->spa_dsl_pool, txg);
if (vd != NULL) {
ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
if (vd->vdev_ops->vdev_op_leaf) {
mutex_enter(&vd->vdev_initialize_lock);
vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED,
NULL);
mutex_exit(&vd->vdev_initialize_lock);
mutex_enter(&vd->vdev_trim_lock);
vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL);
mutex_exit(&vd->vdev_trim_lock);
}
/*
* The vdev may be both a leaf and top-level device.
*/
vdev_autotrim_stop_wait(vd);
spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER);
vdev_free(vd);
spa_config_exit(spa, SCL_STATE_ALL, spa);
}
/*
* If the config changed, update the config cache.
*/
if (config_changed)
spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE);
}
/*
* Unlock the spa_t after adding or removing a vdev. Besides undoing the
* locking of spa_vdev_enter(), we also want make sure the transactions have
* synced to disk, and then update the global configuration cache with the new
* information.
*/
int
spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
{
vdev_autotrim_restart(spa);
vdev_rebuild_restart(spa);
spa_vdev_config_exit(spa, vd, txg, error, FTAG);
mutex_exit(&spa_namespace_lock);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* Lock the given spa_t for the purpose of changing vdev state.
*/
void
spa_vdev_state_enter(spa_t *spa, int oplocks)
{
int locks = SCL_STATE_ALL | oplocks;
/*
* Root pools may need to read of the underlying devfs filesystem
* when opening up a vdev. Unfortunately if we're holding the
* SCL_ZIO lock it will result in a deadlock when we try to issue
* the read from the root filesystem. Instead we "prefetch"
* the associated vnodes that we need prior to opening the
* underlying devices and cache them so that we can prevent
* any I/O when we are doing the actual open.
*/
if (spa_is_root(spa)) {
int low = locks & ~(SCL_ZIO - 1);
int high = locks & ~low;
spa_config_enter(spa, high, spa, RW_WRITER);
vdev_hold(spa->spa_root_vdev);
spa_config_enter(spa, low, spa, RW_WRITER);
} else {
spa_config_enter(spa, locks, spa, RW_WRITER);
}
spa->spa_vdev_locks = locks;
}
int
spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
{
boolean_t config_changed = B_FALSE;
vdev_t *vdev_top;
if (vd == NULL || vd == spa->spa_root_vdev) {
vdev_top = spa->spa_root_vdev;
} else {
vdev_top = vd->vdev_top;
}
if (vd != NULL || error == 0)
vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE, B_FALSE);
if (vd != NULL) {
if (vd != spa->spa_root_vdev)
vdev_state_dirty(vdev_top);
config_changed = B_TRUE;
spa->spa_config_generation++;
}
if (spa_is_root(spa))
vdev_rele(spa->spa_root_vdev);
ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
spa_config_exit(spa, spa->spa_vdev_locks, spa);
/*
* If anything changed, wait for it to sync. This ensures that,
* from the system administrator's perspective, zpool(8) commands
* are synchronous. This is important for things like zpool offline:
* when the command completes, you expect no further I/O from ZFS.
*/
if (vd != NULL)
txg_wait_synced(spa->spa_dsl_pool, 0);
/*
* If the config changed, update the config cache.
*/
if (config_changed) {
mutex_enter(&spa_namespace_lock);
spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE);
mutex_exit(&spa_namespace_lock);
}
return (error);
}
/*
* ==========================================================================
* Miscellaneous functions
* ==========================================================================
*/
void
spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
{
if (!nvlist_exists(spa->spa_label_features, feature)) {
fnvlist_add_boolean(spa->spa_label_features, feature);
/*
* When we are creating the pool (tx_txg==TXG_INITIAL), we can't
* dirty the vdev config because lock SCL_CONFIG is not held.
* Thankfully, in this case we don't need to dirty the config
* because it will be written out anyway when we finish
* creating the pool.
*/
if (tx->tx_txg != TXG_INITIAL)
vdev_config_dirty(spa->spa_root_vdev);
}
}
void
spa_deactivate_mos_feature(spa_t *spa, const char *feature)
{
if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
vdev_config_dirty(spa->spa_root_vdev);
}
/*
* Return the spa_t associated with given pool_guid, if it exists. If
* device_guid is non-zero, determine whether the pool exists *and* contains
* a device with the specified device_guid.
*/
spa_t *
spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
{
spa_t *spa;
avl_tree_t *t = &spa_namespace_avl;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
continue;
if (spa->spa_root_vdev == NULL)
continue;
if (spa_guid(spa) == pool_guid) {
if (device_guid == 0)
break;
if (vdev_lookup_by_guid(spa->spa_root_vdev,
device_guid) != NULL)
break;
/*
* Check any devices we may be in the process of adding.
*/
if (spa->spa_pending_vdev) {
if (vdev_lookup_by_guid(spa->spa_pending_vdev,
device_guid) != NULL)
break;
}
}
}
return (spa);
}
/*
* Determine whether a pool with the given pool_guid exists.
*/
boolean_t
spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
{
return (spa_by_guid(pool_guid, device_guid) != NULL);
}
char *
spa_strdup(const char *s)
{
size_t len;
char *new;
len = strlen(s);
new = kmem_alloc(len + 1, KM_SLEEP);
memcpy(new, s, len + 1);
return (new);
}
void
spa_strfree(char *s)
{
kmem_free(s, strlen(s) + 1);
}
uint64_t
spa_generate_guid(spa_t *spa)
{
uint64_t guid;
if (spa != NULL) {
do {
(void) random_get_pseudo_bytes((void *)&guid,
sizeof (guid));
} while (guid == 0 || spa_guid_exists(spa_guid(spa), guid));
} else {
do {
(void) random_get_pseudo_bytes((void *)&guid,
sizeof (guid));
} while (guid == 0 || spa_guid_exists(guid, 0));
}
return (guid);
}
void
snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
{
char type[256];
const char *checksum = NULL;
const char *compress = NULL;
if (bp != NULL) {
if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
dmu_object_byteswap_t bswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
(void) snprintf(type, sizeof (type), "bswap %s %s",
DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
"metadata" : "data",
dmu_ot_byteswap[bswap].ob_name);
} else {
(void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
sizeof (type));
}
if (!BP_IS_EMBEDDED(bp)) {
checksum =
zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
}
compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
}
SNPRINTF_BLKPTR(kmem_scnprintf, ' ', buf, buflen, bp, type, checksum,
compress);
}
void
spa_freeze(spa_t *spa)
{
uint64_t freeze_txg = 0;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
if (spa->spa_freeze_txg == UINT64_MAX) {
freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
spa->spa_freeze_txg = freeze_txg;
}
spa_config_exit(spa, SCL_ALL, FTAG);
if (freeze_txg != 0)
txg_wait_synced(spa_get_dsl(spa), freeze_txg);
}
void
zfs_panic_recover(const char *fmt, ...)
{
va_list adx;
va_start(adx, fmt);
vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
va_end(adx);
}
/*
* This is a stripped-down version of strtoull, suitable only for converting
* lowercase hexadecimal numbers that don't overflow.
*/
uint64_t
zfs_strtonum(const char *str, char **nptr)
{
uint64_t val = 0;
char c;
int digit;
while ((c = *str) != '\0') {
if (c >= '0' && c <= '9')
digit = c - '0';
else if (c >= 'a' && c <= 'f')
digit = 10 + c - 'a';
else
break;
val *= 16;
val += digit;
str++;
}
if (nptr)
*nptr = (char *)str;
return (val);
}
void
spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx)
{
/*
* We bump the feature refcount for each special vdev added to the pool
*/
ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES));
spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx);
}
/*
* ==========================================================================
* Accessor functions
* ==========================================================================
*/
boolean_t
spa_shutting_down(spa_t *spa)
{
return (spa->spa_async_suspended);
}
dsl_pool_t *
spa_get_dsl(spa_t *spa)
{
return (spa->spa_dsl_pool);
}
boolean_t
spa_is_initializing(spa_t *spa)
{
return (spa->spa_is_initializing);
}
boolean_t
spa_indirect_vdevs_loaded(spa_t *spa)
{
return (spa->spa_indirect_vdevs_loaded);
}
blkptr_t *
spa_get_rootblkptr(spa_t *spa)
{
return (&spa->spa_ubsync.ub_rootbp);
}
void
spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
{
spa->spa_uberblock.ub_rootbp = *bp;
}
void
spa_altroot(spa_t *spa, char *buf, size_t buflen)
{
if (spa->spa_root == NULL)
buf[0] = '\0';
else
(void) strlcpy(buf, spa->spa_root, buflen);
}
uint32_t
spa_sync_pass(spa_t *spa)
{
return (spa->spa_sync_pass);
}
char *
spa_name(spa_t *spa)
{
return (spa->spa_name);
}
uint64_t
spa_guid(spa_t *spa)
{
dsl_pool_t *dp = spa_get_dsl(spa);
uint64_t guid;
/*
* If we fail to parse the config during spa_load(), we can go through
* the error path (which posts an ereport) and end up here with no root
* vdev. We stash the original pool guid in 'spa_config_guid' to handle
* this case.
*/
if (spa->spa_root_vdev == NULL)
return (spa->spa_config_guid);
guid = spa->spa_last_synced_guid != 0 ?
spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
/*
* Return the most recently synced out guid unless we're
* in syncing context.
*/
if (dp && dsl_pool_sync_context(dp))
return (spa->spa_root_vdev->vdev_guid);
else
return (guid);
}
uint64_t
spa_load_guid(spa_t *spa)
{
/*
* This is a GUID that exists solely as a reference for the
* purposes of the arc. It is generated at load time, and
* is never written to persistent storage.
*/
return (spa->spa_load_guid);
}
uint64_t
spa_last_synced_txg(spa_t *spa)
{
return (spa->spa_ubsync.ub_txg);
}
uint64_t
spa_first_txg(spa_t *spa)
{
return (spa->spa_first_txg);
}
uint64_t
spa_syncing_txg(spa_t *spa)
{
return (spa->spa_syncing_txg);
}
/*
* Return the last txg where data can be dirtied. The final txgs
* will be used to just clear out any deferred frees that remain.
*/
uint64_t
spa_final_dirty_txg(spa_t *spa)
{
return (spa->spa_final_txg - TXG_DEFER_SIZE);
}
pool_state_t
spa_state(spa_t *spa)
{
return (spa->spa_state);
}
spa_load_state_t
spa_load_state(spa_t *spa)
{
return (spa->spa_load_state);
}
uint64_t
spa_freeze_txg(spa_t *spa)
{
return (spa->spa_freeze_txg);
}
/*
* Return the inflated asize for a logical write in bytes. This is used by the
* DMU to calculate the space a logical write will require on disk.
* If lsize is smaller than the largest physical block size allocatable on this
* pool we use its value instead, since the write will end up using the whole
* block anyway.
*/
uint64_t
spa_get_worst_case_asize(spa_t *spa, uint64_t lsize)
{
if (lsize == 0)
return (0); /* No inflation needed */
return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation);
}
/*
* Return the amount of slop space in bytes. It is typically 1/32 of the pool
* (3.2%), minus the embedded log space. On very small pools, it may be
* slightly larger than this. On very large pools, it will be capped to
* the value of spa_max_slop. The embedded log space is not included in
* spa_dspace. By subtracting it, the usable space (per "zfs list") is a
* constant 97% of the total space, regardless of metaslab size (assuming the
* default spa_slop_shift=5 and a non-tiny pool).
*
* See the comment above spa_slop_shift for more details.
*/
uint64_t
spa_get_slop_space(spa_t *spa)
{
uint64_t space = 0;
uint64_t slop = 0;
/*
* Make sure spa_dedup_dspace has been set.
*/
if (spa->spa_dedup_dspace == ~0ULL)
spa_update_dspace(spa);
/*
* spa_get_dspace() includes the space only logically "used" by
* deduplicated data, so since it's not useful to reserve more
* space with more deduplicated data, we subtract that out here.
*/
space = spa_get_dspace(spa) - spa->spa_dedup_dspace;
slop = MIN(space >> spa_slop_shift, spa_max_slop);
/*
* Subtract the embedded log space, but no more than half the (3.2%)
* unusable space. Note, the "no more than half" is only relevant if
* zfs_embedded_slog_min_ms >> spa_slop_shift < 2, which is not true by
* default.
*/
uint64_t embedded_log =
metaslab_class_get_dspace(spa_embedded_log_class(spa));
slop -= MIN(embedded_log, slop >> 1);
/*
* Slop space should be at least spa_min_slop, but no more than half
* the entire pool.
*/
slop = MAX(slop, MIN(space >> 1, spa_min_slop));
return (slop);
}
uint64_t
spa_get_dspace(spa_t *spa)
{
return (spa->spa_dspace);
}
uint64_t
spa_get_checkpoint_space(spa_t *spa)
{
return (spa->spa_checkpoint_info.sci_dspace);
}
void
spa_update_dspace(spa_t *spa)
{
spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
ddt_get_dedup_dspace(spa) + brt_get_dspace(spa);
if (spa->spa_nonallocating_dspace > 0) {
/*
* Subtract the space provided by all non-allocating vdevs that
* contribute to dspace. If a file is overwritten, its old
* blocks are freed and new blocks are allocated. If there are
* no snapshots of the file, the available space should remain
* the same. The old blocks could be freed from the
* non-allocating vdev, but the new blocks must be allocated on
* other (allocating) vdevs. By reserving the entire size of
* the non-allocating vdevs (including allocated space), we
* ensure that there will be enough space on the allocating
* vdevs for this file overwrite to succeed.
*
* Note that the DMU/DSL doesn't actually know or care
* how much space is allocated (it does its own tracking
* of how much space has been logically used). So it
* doesn't matter that the data we are moving may be
* allocated twice (on the old device and the new device).
*/
ASSERT3U(spa->spa_dspace, >=, spa->spa_nonallocating_dspace);
spa->spa_dspace -= spa->spa_nonallocating_dspace;
}
}
/*
* Return the failure mode that has been set to this pool. The default
* behavior will be to block all I/Os when a complete failure occurs.
*/
uint64_t
spa_get_failmode(spa_t *spa)
{
return (spa->spa_failmode);
}
boolean_t
spa_suspended(spa_t *spa)
{
return (spa->spa_suspended != ZIO_SUSPEND_NONE);
}
uint64_t
spa_version(spa_t *spa)
{
return (spa->spa_ubsync.ub_version);
}
boolean_t
spa_deflate(spa_t *spa)
{
return (spa->spa_deflate);
}
metaslab_class_t *
spa_normal_class(spa_t *spa)
{
return (spa->spa_normal_class);
}
metaslab_class_t *
spa_log_class(spa_t *spa)
{
return (spa->spa_log_class);
}
metaslab_class_t *
spa_embedded_log_class(spa_t *spa)
{
return (spa->spa_embedded_log_class);
}
metaslab_class_t *
spa_special_class(spa_t *spa)
{
return (spa->spa_special_class);
}
metaslab_class_t *
spa_dedup_class(spa_t *spa)
{
return (spa->spa_dedup_class);
}
/*
* Locate an appropriate allocation class
*/
metaslab_class_t *
spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype,
uint_t level, uint_t special_smallblk)
{
/*
* ZIL allocations determine their class in zio_alloc_zil().
*/
ASSERT(objtype != DMU_OT_INTENT_LOG);
boolean_t has_special_class = spa->spa_special_class->mc_groups != 0;
if (DMU_OT_IS_DDT(objtype)) {
if (spa->spa_dedup_class->mc_groups != 0)
return (spa_dedup_class(spa));
else if (has_special_class && zfs_ddt_data_is_special)
return (spa_special_class(spa));
else
return (spa_normal_class(spa));
}
/* Indirect blocks for user data can land in special if allowed */
if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) {
if (has_special_class && zfs_user_indirect_is_special)
return (spa_special_class(spa));
else
return (spa_normal_class(spa));
}
if (DMU_OT_IS_METADATA(objtype) || level > 0) {
if (has_special_class)
return (spa_special_class(spa));
else
return (spa_normal_class(spa));
}
/*
* Allow small file blocks in special class in some cases (like
* for the dRAID vdev feature). But always leave a reserve of
* zfs_special_class_metadata_reserve_pct exclusively for metadata.
*/
if (DMU_OT_IS_FILE(objtype) &&
has_special_class && size <= special_smallblk) {
metaslab_class_t *special = spa_special_class(spa);
uint64_t alloc = metaslab_class_get_alloc(special);
uint64_t space = metaslab_class_get_space(special);
uint64_t limit =
(space * (100 - zfs_special_class_metadata_reserve_pct))
/ 100;
if (alloc < limit)
return (special);
}
return (spa_normal_class(spa));
}
void
spa_evicting_os_register(spa_t *spa, objset_t *os)
{
mutex_enter(&spa->spa_evicting_os_lock);
list_insert_head(&spa->spa_evicting_os_list, os);
mutex_exit(&spa->spa_evicting_os_lock);
}
void
spa_evicting_os_deregister(spa_t *spa, objset_t *os)
{
mutex_enter(&spa->spa_evicting_os_lock);
list_remove(&spa->spa_evicting_os_list, os);
cv_broadcast(&spa->spa_evicting_os_cv);
mutex_exit(&spa->spa_evicting_os_lock);
}
void
spa_evicting_os_wait(spa_t *spa)
{
mutex_enter(&spa->spa_evicting_os_lock);
while (!list_is_empty(&spa->spa_evicting_os_list))
cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock);
mutex_exit(&spa->spa_evicting_os_lock);
dmu_buf_user_evict_wait();
}
int
spa_max_replication(spa_t *spa)
{
/*
* As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
* handle BPs with more than one DVA allocated. Set our max
* replication level accordingly.
*/
if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
return (1);
return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
}
int
spa_prev_software_version(spa_t *spa)
{
return (spa->spa_prev_software_version);
}
uint64_t
spa_deadman_synctime(spa_t *spa)
{
return (spa->spa_deadman_synctime);
}
spa_autotrim_t
spa_get_autotrim(spa_t *spa)
{
return (spa->spa_autotrim);
}
uint64_t
spa_deadman_ziotime(spa_t *spa)
{
return (spa->spa_deadman_ziotime);
}
uint64_t
spa_get_deadman_failmode(spa_t *spa)
{
return (spa->spa_deadman_failmode);
}
void
spa_set_deadman_failmode(spa_t *spa, const char *failmode)
{
if (strcmp(failmode, "wait") == 0)
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
else if (strcmp(failmode, "continue") == 0)
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE;
else if (strcmp(failmode, "panic") == 0)
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC;
else
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
}
void
spa_set_deadman_ziotime(hrtime_t ns)
{
spa_t *spa = NULL;
if (spa_mode_global != SPA_MODE_UNINIT) {
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL)
spa->spa_deadman_ziotime = ns;
mutex_exit(&spa_namespace_lock);
}
}
void
spa_set_deadman_synctime(hrtime_t ns)
{
spa_t *spa = NULL;
if (spa_mode_global != SPA_MODE_UNINIT) {
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL)
spa->spa_deadman_synctime = ns;
mutex_exit(&spa_namespace_lock);
}
}
uint64_t
dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
{
uint64_t asize = DVA_GET_ASIZE(dva);
uint64_t dsize = asize;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
if (asize != 0 && spa->spa_deflate) {
vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
if (vd != NULL)
dsize = (asize >> SPA_MINBLOCKSHIFT) *
vd->vdev_deflate_ratio;
}
return (dsize);
}
uint64_t
bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
{
uint64_t dsize = 0;
for (int d = 0; d < BP_GET_NDVAS(bp); d++)
dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
return (dsize);
}
uint64_t
bp_get_dsize(spa_t *spa, const blkptr_t *bp)
{
uint64_t dsize = 0;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (int d = 0; d < BP_GET_NDVAS(bp); d++)
dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
spa_config_exit(spa, SCL_VDEV, FTAG);
return (dsize);
}
uint64_t
spa_dirty_data(spa_t *spa)
{
return (spa->spa_dsl_pool->dp_dirty_total);
}
/*
* ==========================================================================
* SPA Import Progress Routines
* ==========================================================================
*/
typedef struct spa_import_progress {
uint64_t pool_guid; /* unique id for updates */
char *pool_name;
spa_load_state_t spa_load_state;
uint64_t mmp_sec_remaining; /* MMP activity check */
uint64_t spa_load_max_txg; /* rewind txg */
procfs_list_node_t smh_node;
} spa_import_progress_t;
spa_history_list_t *spa_import_progress_list = NULL;
static int
spa_import_progress_show_header(struct seq_file *f)
{
seq_printf(f, "%-20s %-14s %-14s %-12s %s\n", "pool_guid",
"load_state", "multihost_secs", "max_txg",
"pool_name");
return (0);
}
static int
spa_import_progress_show(struct seq_file *f, void *data)
{
spa_import_progress_t *sip = (spa_import_progress_t *)data;
seq_printf(f, "%-20llu %-14llu %-14llu %-12llu %s\n",
(u_longlong_t)sip->pool_guid, (u_longlong_t)sip->spa_load_state,
(u_longlong_t)sip->mmp_sec_remaining,
(u_longlong_t)sip->spa_load_max_txg,
(sip->pool_name ? sip->pool_name : "-"));
return (0);
}
/* Remove oldest elements from list until there are no more than 'size' left */
static void
spa_import_progress_truncate(spa_history_list_t *shl, unsigned int size)
{
spa_import_progress_t *sip;
while (shl->size > size) {
sip = list_remove_head(&shl->procfs_list.pl_list);
if (sip->pool_name)
spa_strfree(sip->pool_name);
kmem_free(sip, sizeof (spa_import_progress_t));
shl->size--;
}
IMPLY(size == 0, list_is_empty(&shl->procfs_list.pl_list));
}
static void
spa_import_progress_init(void)
{
spa_import_progress_list = kmem_zalloc(sizeof (spa_history_list_t),
KM_SLEEP);
spa_import_progress_list->size = 0;
spa_import_progress_list->procfs_list.pl_private =
spa_import_progress_list;
procfs_list_install("zfs",
NULL,
"import_progress",
0644,
&spa_import_progress_list->procfs_list,
spa_import_progress_show,
spa_import_progress_show_header,
NULL,
offsetof(spa_import_progress_t, smh_node));
}
static void
spa_import_progress_destroy(void)
{
spa_history_list_t *shl = spa_import_progress_list;
procfs_list_uninstall(&shl->procfs_list);
spa_import_progress_truncate(shl, 0);
procfs_list_destroy(&shl->procfs_list);
kmem_free(shl, sizeof (spa_history_list_t));
}
int
spa_import_progress_set_state(uint64_t pool_guid,
spa_load_state_t load_state)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
int error = ENOENT;
if (shl->size == 0)
return (0);
mutex_enter(&shl->procfs_list.pl_lock);
for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
sip = list_prev(&shl->procfs_list.pl_list, sip)) {
if (sip->pool_guid == pool_guid) {
sip->spa_load_state = load_state;
error = 0;
break;
}
}
mutex_exit(&shl->procfs_list.pl_lock);
return (error);
}
int
spa_import_progress_set_max_txg(uint64_t pool_guid, uint64_t load_max_txg)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
int error = ENOENT;
if (shl->size == 0)
return (0);
mutex_enter(&shl->procfs_list.pl_lock);
for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
sip = list_prev(&shl->procfs_list.pl_list, sip)) {
if (sip->pool_guid == pool_guid) {
sip->spa_load_max_txg = load_max_txg;
error = 0;
break;
}
}
mutex_exit(&shl->procfs_list.pl_lock);
return (error);
}
int
spa_import_progress_set_mmp_check(uint64_t pool_guid,
uint64_t mmp_sec_remaining)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
int error = ENOENT;
if (shl->size == 0)
return (0);
mutex_enter(&shl->procfs_list.pl_lock);
for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
sip = list_prev(&shl->procfs_list.pl_list, sip)) {
if (sip->pool_guid == pool_guid) {
sip->mmp_sec_remaining = mmp_sec_remaining;
error = 0;
break;
}
}
mutex_exit(&shl->procfs_list.pl_lock);
return (error);
}
/*
* A new import is in progress, add an entry.
*/
void
spa_import_progress_add(spa_t *spa)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
const char *poolname = NULL;
sip = kmem_zalloc(sizeof (spa_import_progress_t), KM_SLEEP);
sip->pool_guid = spa_guid(spa);
(void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME,
&poolname);
if (poolname == NULL)
poolname = spa_name(spa);
sip->pool_name = spa_strdup(poolname);
sip->spa_load_state = spa_load_state(spa);
mutex_enter(&shl->procfs_list.pl_lock);
procfs_list_add(&shl->procfs_list, sip);
shl->size++;
mutex_exit(&shl->procfs_list.pl_lock);
}
void
spa_import_progress_remove(uint64_t pool_guid)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
mutex_enter(&shl->procfs_list.pl_lock);
for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
sip = list_prev(&shl->procfs_list.pl_list, sip)) {
if (sip->pool_guid == pool_guid) {
if (sip->pool_name)
spa_strfree(sip->pool_name);
list_remove(&shl->procfs_list.pl_list, sip);
shl->size--;
kmem_free(sip, sizeof (spa_import_progress_t));
break;
}
}
mutex_exit(&shl->procfs_list.pl_lock);
}
/*
* ==========================================================================
* Initialization and Termination
* ==========================================================================
*/
static int
spa_name_compare(const void *a1, const void *a2)
{
const spa_t *s1 = a1;
const spa_t *s2 = a2;
int s;
s = strcmp(s1->spa_name, s2->spa_name);
return (TREE_ISIGN(s));
}
void
spa_boot_init(void)
{
spa_config_load();
}
void
spa_init(spa_mode_t mode)
{
mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
offsetof(spa_t, spa_avl));
avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
offsetof(spa_aux_t, aux_avl));
avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
offsetof(spa_aux_t, aux_avl));
spa_mode_global = mode;
#ifndef _KERNEL
if (spa_mode_global != SPA_MODE_READ && dprintf_find_string("watch")) {
struct sigaction sa;
sa.sa_flags = SA_SIGINFO;
sigemptyset(&sa.sa_mask);
sa.sa_sigaction = arc_buf_sigsegv;
if (sigaction(SIGSEGV, &sa, NULL) == -1) {
perror("could not enable watchpoints: "
"sigaction(SIGSEGV, ...) = ");
} else {
arc_watch = B_TRUE;
}
}
#endif
fm_init();
zfs_refcount_init();
unique_init();
zfs_btree_init();
metaslab_stat_init();
brt_init();
ddt_init();
zio_init();
dmu_init();
zil_init();
vdev_mirror_stat_init();
vdev_raidz_math_init();
vdev_file_init();
zfs_prop_init();
chksum_init();
zpool_prop_init();
zpool_feature_init();
spa_config_load();
vdev_prop_init();
l2arc_start();
scan_init();
qat_init();
spa_import_progress_init();
}
void
spa_fini(void)
{
l2arc_stop();
spa_evict_all();
vdev_file_fini();
vdev_mirror_stat_fini();
vdev_raidz_math_fini();
chksum_fini();
zil_fini();
dmu_fini();
zio_fini();
ddt_fini();
brt_fini();
metaslab_stat_fini();
zfs_btree_fini();
unique_fini();
zfs_refcount_fini();
fm_fini();
scan_fini();
qat_fini();
spa_import_progress_destroy();
avl_destroy(&spa_namespace_avl);
avl_destroy(&spa_spare_avl);
avl_destroy(&spa_l2cache_avl);
cv_destroy(&spa_namespace_cv);
mutex_destroy(&spa_namespace_lock);
mutex_destroy(&spa_spare_lock);
mutex_destroy(&spa_l2cache_lock);
}
/*
* Return whether this pool has a dedicated slog device. No locking needed.
* It's not a problem if the wrong answer is returned as it's only for
* performance and not correctness.
*/
boolean_t
spa_has_slogs(spa_t *spa)
{
return (spa->spa_log_class->mc_groups != 0);
}
spa_log_state_t
spa_get_log_state(spa_t *spa)
{
return (spa->spa_log_state);
}
void
spa_set_log_state(spa_t *spa, spa_log_state_t state)
{
spa->spa_log_state = state;
}
boolean_t
spa_is_root(spa_t *spa)
{
return (spa->spa_is_root);
}
boolean_t
spa_writeable(spa_t *spa)
{
return (!!(spa->spa_mode & SPA_MODE_WRITE) && spa->spa_trust_config);
}
/*
* Returns true if there is a pending sync task in any of the current
* syncing txg, the current quiescing txg, or the current open txg.
*/
boolean_t
spa_has_pending_synctask(spa_t *spa)
{
return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) ||
!txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks));
}
spa_mode_t
spa_mode(spa_t *spa)
{
return (spa->spa_mode);
}
uint64_t
spa_bootfs(spa_t *spa)
{
return (spa->spa_bootfs);
}
uint64_t
spa_delegation(spa_t *spa)
{
return (spa->spa_delegation);
}
objset_t *
spa_meta_objset(spa_t *spa)
{
return (spa->spa_meta_objset);
}
enum zio_checksum
spa_dedup_checksum(spa_t *spa)
{
return (spa->spa_dedup_checksum);
}
/*
* Reset pool scan stat per scan pass (or reboot).
*/
void
spa_scan_stat_init(spa_t *spa)
{
/* data not stored on disk */
spa->spa_scan_pass_start = gethrestime_sec();
if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan))
spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start;
else
spa->spa_scan_pass_scrub_pause = 0;
if (dsl_errorscrub_is_paused(spa->spa_dsl_pool->dp_scan))
spa->spa_scan_pass_errorscrub_pause = spa->spa_scan_pass_start;
else
spa->spa_scan_pass_errorscrub_pause = 0;
spa->spa_scan_pass_scrub_spent_paused = 0;
spa->spa_scan_pass_exam = 0;
spa->spa_scan_pass_issued = 0;
// error scrub stats
spa->spa_scan_pass_errorscrub_spent_paused = 0;
}
/*
* Get scan stats for zpool status reports
*/
int
spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
{
dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
if (scn == NULL || (scn->scn_phys.scn_func == POOL_SCAN_NONE &&
scn->errorscrub_phys.dep_func == POOL_SCAN_NONE))
return (SET_ERROR(ENOENT));
memset(ps, 0, sizeof (pool_scan_stat_t));
/* data stored on disk */
ps->pss_func = scn->scn_phys.scn_func;
ps->pss_state = scn->scn_phys.scn_state;
ps->pss_start_time = scn->scn_phys.scn_start_time;
ps->pss_end_time = scn->scn_phys.scn_end_time;
ps->pss_to_examine = scn->scn_phys.scn_to_examine;
ps->pss_examined = scn->scn_phys.scn_examined;
ps->pss_skipped = scn->scn_phys.scn_skipped;
ps->pss_processed = scn->scn_phys.scn_processed;
ps->pss_errors = scn->scn_phys.scn_errors;
/* data not stored on disk */
ps->pss_pass_exam = spa->spa_scan_pass_exam;
ps->pss_pass_start = spa->spa_scan_pass_start;
ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause;
ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused;
ps->pss_pass_issued = spa->spa_scan_pass_issued;
ps->pss_issued =
scn->scn_issued_before_pass + spa->spa_scan_pass_issued;
/* error scrub data stored on disk */
ps->pss_error_scrub_func = scn->errorscrub_phys.dep_func;
ps->pss_error_scrub_state = scn->errorscrub_phys.dep_state;
ps->pss_error_scrub_start = scn->errorscrub_phys.dep_start_time;
ps->pss_error_scrub_end = scn->errorscrub_phys.dep_end_time;
ps->pss_error_scrub_examined = scn->errorscrub_phys.dep_examined;
ps->pss_error_scrub_to_be_examined =
scn->errorscrub_phys.dep_to_examine;
/* error scrub data not stored on disk */
ps->pss_pass_error_scrub_pause = spa->spa_scan_pass_errorscrub_pause;
return (0);
}
int
spa_maxblocksize(spa_t *spa)
{
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
return (SPA_MAXBLOCKSIZE);
else
return (SPA_OLD_MAXBLOCKSIZE);
}
/*
* Returns the txg that the last device removal completed. No indirect mappings
* have been added since this txg.
*/
uint64_t
spa_get_last_removal_txg(spa_t *spa)
{
uint64_t vdevid;
uint64_t ret = -1ULL;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
/*
* sr_prev_indirect_vdev is only modified while holding all the
* config locks, so it is sufficient to hold SCL_VDEV as reader when
* examining it.
*/
vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev;
while (vdevid != -1ULL) {
vdev_t *vd = vdev_lookup_top(spa, vdevid);
vdev_indirect_births_t *vib = vd->vdev_indirect_births;
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
/*
* If the removal did not remap any data, we don't care.
*/
if (vdev_indirect_births_count(vib) != 0) {
ret = vdev_indirect_births_last_entry_txg(vib);
break;
}
vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev;
}
spa_config_exit(spa, SCL_VDEV, FTAG);
IMPLY(ret != -1ULL,
spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
return (ret);
}
int
spa_maxdnodesize(spa_t *spa)
{
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
return (DNODE_MAX_SIZE);
else
return (DNODE_MIN_SIZE);
}
boolean_t
spa_multihost(spa_t *spa)
{
return (spa->spa_multihost ? B_TRUE : B_FALSE);
}
uint32_t
spa_get_hostid(spa_t *spa)
{
return (spa->spa_hostid);
}
boolean_t
spa_trust_config(spa_t *spa)
{
return (spa->spa_trust_config);
}
uint64_t
spa_missing_tvds_allowed(spa_t *spa)
{
return (spa->spa_missing_tvds_allowed);
}
space_map_t *
spa_syncing_log_sm(spa_t *spa)
{
return (spa->spa_syncing_log_sm);
}
void
spa_set_missing_tvds(spa_t *spa, uint64_t missing)
{
spa->spa_missing_tvds = missing;
}
/*
* Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc).
*/
const char *
spa_state_to_name(spa_t *spa)
{
ASSERT3P(spa, !=, NULL);
/*
* it is possible for the spa to exist, without root vdev
* as the spa transitions during import/export
*/
vdev_t *rvd = spa->spa_root_vdev;
if (rvd == NULL) {
return ("TRANSITIONING");
}
vdev_state_t state = rvd->vdev_state;
vdev_aux_t aux = rvd->vdev_stat.vs_aux;
if (spa_suspended(spa) &&
(spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE))
return ("SUSPENDED");
switch (state) {
case VDEV_STATE_CLOSED:
case VDEV_STATE_OFFLINE:
return ("OFFLINE");
case VDEV_STATE_REMOVED:
return ("REMOVED");
case VDEV_STATE_CANT_OPEN:
if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
return ("FAULTED");
else if (aux == VDEV_AUX_SPLIT_POOL)
return ("SPLIT");
else
return ("UNAVAIL");
case VDEV_STATE_FAULTED:
return ("FAULTED");
case VDEV_STATE_DEGRADED:
return ("DEGRADED");
case VDEV_STATE_HEALTHY:
return ("ONLINE");
default:
break;
}
return ("UNKNOWN");
}
boolean_t
spa_top_vdevs_spacemap_addressable(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
if (!vdev_is_spacemap_addressable(rvd->vdev_child[c]))
return (B_FALSE);
}
return (B_TRUE);
}
boolean_t
spa_has_checkpoint(spa_t *spa)
{
return (spa->spa_checkpoint_txg != 0);
}
boolean_t
spa_importing_readonly_checkpoint(spa_t *spa)
{
return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) &&
spa->spa_mode == SPA_MODE_READ);
}
uint64_t
spa_min_claim_txg(spa_t *spa)
{
uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg;
if (checkpoint_txg != 0)
return (checkpoint_txg + 1);
return (spa->spa_first_txg);
}
/*
* If there is a checkpoint, async destroys may consume more space from
* the pool instead of freeing it. In an attempt to save the pool from
* getting suspended when it is about to run out of space, we stop
* processing async destroys.
*/
boolean_t
spa_suspend_async_destroy(spa_t *spa)
{
dsl_pool_t *dp = spa_get_dsl(spa);
uint64_t unreserved = dsl_pool_unreserved_space(dp,
ZFS_SPACE_CHECK_EXTRA_RESERVED);
uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes;
uint64_t avail = (unreserved > used) ? (unreserved - used) : 0;
if (spa_has_checkpoint(spa) && avail == 0)
return (B_TRUE);
return (B_FALSE);
}
#if defined(_KERNEL)
int
param_set_deadman_failmode_common(const char *val)
{
spa_t *spa = NULL;
char *p;
if (val == NULL)
return (SET_ERROR(EINVAL));
if ((p = strchr(val, '\n')) != NULL)
*p = '\0';
if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 &&
strcmp(val, "panic"))
return (SET_ERROR(EINVAL));
if (spa_mode_global != SPA_MODE_UNINIT) {
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL)
spa_set_deadman_failmode(spa, val);
mutex_exit(&spa_namespace_lock);
}
return (0);
}
#endif
/* Namespace manipulation */
EXPORT_SYMBOL(spa_lookup);
EXPORT_SYMBOL(spa_add);
EXPORT_SYMBOL(spa_remove);
EXPORT_SYMBOL(spa_next);
/* Refcount functions */
EXPORT_SYMBOL(spa_open_ref);
EXPORT_SYMBOL(spa_close);
EXPORT_SYMBOL(spa_refcount_zero);
/* Pool configuration lock */
EXPORT_SYMBOL(spa_config_tryenter);
EXPORT_SYMBOL(spa_config_enter);
EXPORT_SYMBOL(spa_config_exit);
EXPORT_SYMBOL(spa_config_held);
/* Pool vdev add/remove lock */
EXPORT_SYMBOL(spa_vdev_enter);
EXPORT_SYMBOL(spa_vdev_exit);
/* Pool vdev state change lock */
EXPORT_SYMBOL(spa_vdev_state_enter);
EXPORT_SYMBOL(spa_vdev_state_exit);
/* Accessor functions */
EXPORT_SYMBOL(spa_shutting_down);
EXPORT_SYMBOL(spa_get_dsl);
EXPORT_SYMBOL(spa_get_rootblkptr);
EXPORT_SYMBOL(spa_set_rootblkptr);
EXPORT_SYMBOL(spa_altroot);
EXPORT_SYMBOL(spa_sync_pass);
EXPORT_SYMBOL(spa_name);
EXPORT_SYMBOL(spa_guid);
EXPORT_SYMBOL(spa_last_synced_txg);
EXPORT_SYMBOL(spa_first_txg);
EXPORT_SYMBOL(spa_syncing_txg);
EXPORT_SYMBOL(spa_version);
EXPORT_SYMBOL(spa_state);
EXPORT_SYMBOL(spa_load_state);
EXPORT_SYMBOL(spa_freeze_txg);
EXPORT_SYMBOL(spa_get_dspace);
EXPORT_SYMBOL(spa_update_dspace);
EXPORT_SYMBOL(spa_deflate);
EXPORT_SYMBOL(spa_normal_class);
EXPORT_SYMBOL(spa_log_class);
EXPORT_SYMBOL(spa_special_class);
EXPORT_SYMBOL(spa_preferred_class);
EXPORT_SYMBOL(spa_max_replication);
EXPORT_SYMBOL(spa_prev_software_version);
EXPORT_SYMBOL(spa_get_failmode);
EXPORT_SYMBOL(spa_suspended);
EXPORT_SYMBOL(spa_bootfs);
EXPORT_SYMBOL(spa_delegation);
EXPORT_SYMBOL(spa_meta_objset);
EXPORT_SYMBOL(spa_maxblocksize);
EXPORT_SYMBOL(spa_maxdnodesize);
/* Miscellaneous support routines */
EXPORT_SYMBOL(spa_guid_exists);
EXPORT_SYMBOL(spa_strdup);
EXPORT_SYMBOL(spa_strfree);
EXPORT_SYMBOL(spa_generate_guid);
EXPORT_SYMBOL(snprintf_blkptr);
EXPORT_SYMBOL(spa_freeze);
EXPORT_SYMBOL(spa_upgrade);
EXPORT_SYMBOL(spa_evict_all);
EXPORT_SYMBOL(spa_lookup_by_guid);
EXPORT_SYMBOL(spa_has_spare);
EXPORT_SYMBOL(dva_get_dsize_sync);
EXPORT_SYMBOL(bp_get_dsize_sync);
EXPORT_SYMBOL(bp_get_dsize);
EXPORT_SYMBOL(spa_has_slogs);
EXPORT_SYMBOL(spa_is_root);
EXPORT_SYMBOL(spa_writeable);
EXPORT_SYMBOL(spa_mode);
EXPORT_SYMBOL(spa_namespace_lock);
EXPORT_SYMBOL(spa_trust_config);
EXPORT_SYMBOL(spa_missing_tvds_allowed);
EXPORT_SYMBOL(spa_set_missing_tvds);
EXPORT_SYMBOL(spa_state_to_name);
EXPORT_SYMBOL(spa_importing_readonly_checkpoint);
EXPORT_SYMBOL(spa_min_claim_txg);
EXPORT_SYMBOL(spa_suspend_async_destroy);
EXPORT_SYMBOL(spa_has_checkpoint);
EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable);
ZFS_MODULE_PARAM(zfs, zfs_, flags, UINT, ZMOD_RW,
"Set additional debugging flags");
ZFS_MODULE_PARAM(zfs, zfs_, recover, INT, ZMOD_RW,
"Set to attempt to recover from fatal errors");
ZFS_MODULE_PARAM(zfs, zfs_, free_leak_on_eio, INT, ZMOD_RW,
"Set to ignore IO errors during free and permanently leak the space");
ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, checktime_ms, U64, ZMOD_RW,
"Dead I/O check interval in milliseconds");
ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW,
"Enable deadman timer");
ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, UINT, ZMOD_RW,
"SPA size estimate multiplication factor");
ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW,
"Place DDT data into the special class");
ZFS_MODULE_PARAM(zfs, zfs_, user_indirect_is_special, INT, ZMOD_RW,
"Place user data indirect blocks into the special class");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, failmode,
param_set_deadman_failmode, param_get_charp, ZMOD_RW,
"Failmode for deadman timer");
ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, synctime_ms,
param_set_deadman_synctime, spl_param_get_u64, ZMOD_RW,
"Pool sync expiration time in milliseconds");
ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms,
param_set_deadman_ziotime, spl_param_get_u64, ZMOD_RW,
"IO expiration time in milliseconds");
ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, UINT, ZMOD_RW,
"Small file blocks in special vdevs depends on this much "
"free space available");
/* END CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift,
param_get_uint, ZMOD_RW, "Reserved free space in pool");
diff --git a/sys/contrib/openzfs/module/zfs/vdev.c b/sys/contrib/openzfs/module/zfs/vdev.c
index 30551feb6322..87c145593237 100644
--- a/sys/contrib/openzfs/module/zfs/vdev.c
+++ b/sys/contrib/openzfs/module/zfs/vdev.c
@@ -1,6376 +1,6412 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2021 by Delphix. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Datto Inc. All rights reserved.
* Copyright (c) 2021, Klara Inc.
* Copyright (c) 2021, 2023 Hewlett Packard Enterprise Development LP.
*/
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/bpobj.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_dir.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_rebuild.h>
#include <sys/vdev_draid.h>
#include <sys/uberblock_impl.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/space_map.h>
#include <sys/space_reftree.h>
#include <sys/zio.h>
#include <sys/zap.h>
#include <sys/fs/zfs.h>
#include <sys/arc.h>
#include <sys/zil.h>
#include <sys/dsl_scan.h>
#include <sys/vdev_raidz.h>
#include <sys/abd.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_trim.h>
#include <sys/zvol.h>
#include <sys/zfs_ratelimit.h>
#include "zfs_prop.h"
/*
* One metaslab from each (normal-class) vdev is used by the ZIL. These are
* called "embedded slog metaslabs", are referenced by vdev_log_mg, and are
* part of the spa_embedded_log_class. The metaslab with the most free space
* in each vdev is selected for this purpose when the pool is opened (or a
* vdev is added). See vdev_metaslab_init().
*
* Log blocks can be allocated from the following locations. Each one is tried
* in order until the allocation succeeds:
* 1. dedicated log vdevs, aka "slog" (spa_log_class)
* 2. embedded slog metaslabs (spa_embedded_log_class)
* 3. other metaslabs in normal vdevs (spa_normal_class)
*
* zfs_embedded_slog_min_ms disables the embedded slog if there are fewer
* than this number of metaslabs in the vdev. This ensures that we don't set
* aside an unreasonable amount of space for the ZIL. If set to less than
* 1 << (spa_slop_shift + 1), on small pools the usable space may be reduced
* (by more than 1<<spa_slop_shift) due to the embedded slog metaslab.
*/
static uint_t zfs_embedded_slog_min_ms = 64;
/* default target for number of metaslabs per top-level vdev */
static uint_t zfs_vdev_default_ms_count = 200;
/* minimum number of metaslabs per top-level vdev */
static uint_t zfs_vdev_min_ms_count = 16;
/* practical upper limit of total metaslabs per top-level vdev */
static uint_t zfs_vdev_ms_count_limit = 1ULL << 17;
/* lower limit for metaslab size (512M) */
static uint_t zfs_vdev_default_ms_shift = 29;
/* upper limit for metaslab size (16G) */
static uint_t zfs_vdev_max_ms_shift = 34;
int vdev_validate_skip = B_FALSE;
/*
* Since the DTL space map of a vdev is not expected to have a lot of
* entries, we default its block size to 4K.
*/
int zfs_vdev_dtl_sm_blksz = (1 << 12);
/*
* Rate limit slow IO (delay) events to this many per second.
*/
static unsigned int zfs_slow_io_events_per_second = 20;
/*
* Rate limit checksum events after this many checksum errors per second.
*/
static unsigned int zfs_checksum_events_per_second = 20;
/*
* Ignore errors during scrub/resilver. Allows to work around resilver
* upon import when there are pool errors.
*/
static int zfs_scan_ignore_errors = 0;
/*
* vdev-wide space maps that have lots of entries written to them at
* the end of each transaction can benefit from a higher I/O bandwidth
* (e.g. vdev_obsolete_sm), thus we default their block size to 128K.
*/
int zfs_vdev_standard_sm_blksz = (1 << 17);
/*
* Tunable parameter for debugging or performance analysis. Setting this
* will cause pool corruption on power loss if a volatile out-of-order
* write cache is enabled.
*/
int zfs_nocacheflush = 0;
/*
* Maximum and minimum ashift values that can be automatically set based on
* vdev's physical ashift (disk's physical sector size). While ASHIFT_MAX
* is higher than the maximum value, it is intentionally limited here to not
* excessively impact pool space efficiency. Higher ashift values may still
* be forced by vdev logical ashift or by user via ashift property, but won't
* be set automatically as a performance optimization.
*/
uint_t zfs_vdev_max_auto_ashift = 14;
uint_t zfs_vdev_min_auto_ashift = ASHIFT_MIN;
void
vdev_dbgmsg(vdev_t *vd, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
if (vd->vdev_path != NULL) {
zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type,
vd->vdev_path, buf);
} else {
zfs_dbgmsg("%s-%llu vdev (guid %llu): %s",
vd->vdev_ops->vdev_op_type,
(u_longlong_t)vd->vdev_id,
(u_longlong_t)vd->vdev_guid, buf);
}
}
void
vdev_dbgmsg_print_tree(vdev_t *vd, int indent)
{
char state[20];
if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) {
zfs_dbgmsg("%*svdev %llu: %s", indent, "",
(u_longlong_t)vd->vdev_id,
vd->vdev_ops->vdev_op_type);
return;
}
switch (vd->vdev_state) {
case VDEV_STATE_UNKNOWN:
(void) snprintf(state, sizeof (state), "unknown");
break;
case VDEV_STATE_CLOSED:
(void) snprintf(state, sizeof (state), "closed");
break;
case VDEV_STATE_OFFLINE:
(void) snprintf(state, sizeof (state), "offline");
break;
case VDEV_STATE_REMOVED:
(void) snprintf(state, sizeof (state), "removed");
break;
case VDEV_STATE_CANT_OPEN:
(void) snprintf(state, sizeof (state), "can't open");
break;
case VDEV_STATE_FAULTED:
(void) snprintf(state, sizeof (state), "faulted");
break;
case VDEV_STATE_DEGRADED:
(void) snprintf(state, sizeof (state), "degraded");
break;
case VDEV_STATE_HEALTHY:
(void) snprintf(state, sizeof (state), "healthy");
break;
default:
(void) snprintf(state, sizeof (state), "<state %u>",
(uint_t)vd->vdev_state);
}
zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent,
"", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type,
vd->vdev_islog ? " (log)" : "",
(u_longlong_t)vd->vdev_guid,
vd->vdev_path ? vd->vdev_path : "N/A", state);
for (uint64_t i = 0; i < vd->vdev_children; i++)
vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2);
}
/*
* Virtual device management.
*/
static vdev_ops_t *const vdev_ops_table[] = {
&vdev_root_ops,
&vdev_raidz_ops,
&vdev_draid_ops,
&vdev_draid_spare_ops,
&vdev_mirror_ops,
&vdev_replacing_ops,
&vdev_spare_ops,
&vdev_disk_ops,
&vdev_file_ops,
&vdev_missing_ops,
&vdev_hole_ops,
&vdev_indirect_ops,
NULL
};
/*
* Given a vdev type, return the appropriate ops vector.
*/
static vdev_ops_t *
vdev_getops(const char *type)
{
vdev_ops_t *ops, *const *opspp;
for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
if (strcmp(ops->vdev_op_type, type) == 0)
break;
return (ops);
}
/*
* Given a vdev and a metaslab class, find which metaslab group we're
* interested in. All vdevs may belong to two different metaslab classes.
* Dedicated slog devices use only the primary metaslab group, rather than a
* separate log group. For embedded slogs, the vdev_log_mg will be non-NULL.
*/
metaslab_group_t *
vdev_get_mg(vdev_t *vd, metaslab_class_t *mc)
{
if (mc == spa_embedded_log_class(vd->vdev_spa) &&
vd->vdev_log_mg != NULL)
return (vd->vdev_log_mg);
else
return (vd->vdev_mg);
}
void
vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
{
(void) vd, (void) remain_rs;
physical_rs->rs_start = logical_rs->rs_start;
physical_rs->rs_end = logical_rs->rs_end;
}
/*
* Derive the enumerated allocation bias from string input.
* String origin is either the per-vdev zap or zpool(8).
*/
static vdev_alloc_bias_t
vdev_derive_alloc_bias(const char *bias)
{
vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
if (strcmp(bias, VDEV_ALLOC_BIAS_LOG) == 0)
alloc_bias = VDEV_BIAS_LOG;
else if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
alloc_bias = VDEV_BIAS_SPECIAL;
else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
alloc_bias = VDEV_BIAS_DEDUP;
return (alloc_bias);
}
/*
* Default asize function: return the MAX of psize with the asize of
* all children. This is what's used by anything other than RAID-Z.
*/
uint64_t
vdev_default_asize(vdev_t *vd, uint64_t psize)
{
uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
uint64_t csize;
for (int c = 0; c < vd->vdev_children; c++) {
csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
asize = MAX(asize, csize);
}
return (asize);
}
uint64_t
vdev_default_min_asize(vdev_t *vd)
{
return (vd->vdev_min_asize);
}
/*
* Get the minimum allocatable size. We define the allocatable size as
* the vdev's asize rounded to the nearest metaslab. This allows us to
* replace or attach devices which don't have the same physical size but
* can still satisfy the same number of allocations.
*/
uint64_t
vdev_get_min_asize(vdev_t *vd)
{
vdev_t *pvd = vd->vdev_parent;
/*
* If our parent is NULL (inactive spare or cache) or is the root,
* just return our own asize.
*/
if (pvd == NULL)
return (vd->vdev_asize);
/*
* The top-level vdev just returns the allocatable size rounded
* to the nearest metaslab.
*/
if (vd == vd->vdev_top)
return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift));
return (pvd->vdev_ops->vdev_op_min_asize(pvd));
}
void
vdev_set_min_asize(vdev_t *vd)
{
vd->vdev_min_asize = vdev_get_min_asize(vd);
for (int c = 0; c < vd->vdev_children; c++)
vdev_set_min_asize(vd->vdev_child[c]);
}
/*
* Get the minimal allocation size for the top-level vdev.
*/
uint64_t
vdev_get_min_alloc(vdev_t *vd)
{
uint64_t min_alloc = 1ULL << vd->vdev_ashift;
if (vd->vdev_ops->vdev_op_min_alloc != NULL)
min_alloc = vd->vdev_ops->vdev_op_min_alloc(vd);
return (min_alloc);
}
/*
* Get the parity level for a top-level vdev.
*/
uint64_t
vdev_get_nparity(vdev_t *vd)
{
uint64_t nparity = 0;
if (vd->vdev_ops->vdev_op_nparity != NULL)
nparity = vd->vdev_ops->vdev_op_nparity(vd);
return (nparity);
}
static int
vdev_prop_get_int(vdev_t *vd, vdev_prop_t prop, uint64_t *value)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
uint64_t objid;
int err;
if (vd->vdev_root_zap != 0) {
objid = vd->vdev_root_zap;
} else if (vd->vdev_top_zap != 0) {
objid = vd->vdev_top_zap;
} else if (vd->vdev_leaf_zap != 0) {
objid = vd->vdev_leaf_zap;
} else {
return (EINVAL);
}
err = zap_lookup(mos, objid, vdev_prop_to_name(prop),
sizeof (uint64_t), 1, value);
if (err == ENOENT)
*value = vdev_prop_default_numeric(prop);
return (err);
}
/*
* Get the number of data disks for a top-level vdev.
*/
uint64_t
vdev_get_ndisks(vdev_t *vd)
{
uint64_t ndisks = 1;
if (vd->vdev_ops->vdev_op_ndisks != NULL)
ndisks = vd->vdev_ops->vdev_op_ndisks(vd);
return (ndisks);
}
vdev_t *
vdev_lookup_top(spa_t *spa, uint64_t vdev)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
if (vdev < rvd->vdev_children) {
ASSERT(rvd->vdev_child[vdev] != NULL);
return (rvd->vdev_child[vdev]);
}
return (NULL);
}
vdev_t *
vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
{
vdev_t *mvd;
if (vd->vdev_guid == guid)
return (vd);
for (int c = 0; c < vd->vdev_children; c++)
if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
NULL)
return (mvd);
return (NULL);
}
static int
vdev_count_leaves_impl(vdev_t *vd)
{
int n = 0;
if (vd->vdev_ops->vdev_op_leaf)
return (1);
for (int c = 0; c < vd->vdev_children; c++)
n += vdev_count_leaves_impl(vd->vdev_child[c]);
return (n);
}
int
vdev_count_leaves(spa_t *spa)
{
int rc;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
rc = vdev_count_leaves_impl(spa->spa_root_vdev);
spa_config_exit(spa, SCL_VDEV, FTAG);
return (rc);
}
void
vdev_add_child(vdev_t *pvd, vdev_t *cvd)
{
size_t oldsize, newsize;
uint64_t id = cvd->vdev_id;
vdev_t **newchild;
ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(cvd->vdev_parent == NULL);
cvd->vdev_parent = pvd;
if (pvd == NULL)
return;
ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
oldsize = pvd->vdev_children * sizeof (vdev_t *);
pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
newsize = pvd->vdev_children * sizeof (vdev_t *);
newchild = kmem_alloc(newsize, KM_SLEEP);
if (pvd->vdev_child != NULL) {
memcpy(newchild, pvd->vdev_child, oldsize);
kmem_free(pvd->vdev_child, oldsize);
}
pvd->vdev_child = newchild;
pvd->vdev_child[id] = cvd;
cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
/*
* Walk up all ancestors to update guid sum.
*/
for (; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum += cvd->vdev_guid_sum;
if (cvd->vdev_ops->vdev_op_leaf) {
list_insert_head(&cvd->vdev_spa->spa_leaf_list, cvd);
cvd->vdev_spa->spa_leaf_list_gen++;
}
}
void
vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
{
int c;
uint_t id = cvd->vdev_id;
ASSERT(cvd->vdev_parent == pvd);
if (pvd == NULL)
return;
ASSERT(id < pvd->vdev_children);
ASSERT(pvd->vdev_child[id] == cvd);
pvd->vdev_child[id] = NULL;
cvd->vdev_parent = NULL;
for (c = 0; c < pvd->vdev_children; c++)
if (pvd->vdev_child[c])
break;
if (c == pvd->vdev_children) {
kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
pvd->vdev_child = NULL;
pvd->vdev_children = 0;
}
if (cvd->vdev_ops->vdev_op_leaf) {
spa_t *spa = cvd->vdev_spa;
list_remove(&spa->spa_leaf_list, cvd);
spa->spa_leaf_list_gen++;
}
/*
* Walk up all ancestors to update guid sum.
*/
for (; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
}
/*
* Remove any holes in the child array.
*/
void
vdev_compact_children(vdev_t *pvd)
{
vdev_t **newchild, *cvd;
int oldc = pvd->vdev_children;
int newc;
ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if (oldc == 0)
return;
for (int c = newc = 0; c < oldc; c++)
if (pvd->vdev_child[c])
newc++;
if (newc > 0) {
newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP);
for (int c = newc = 0; c < oldc; c++) {
if ((cvd = pvd->vdev_child[c]) != NULL) {
newchild[newc] = cvd;
cvd->vdev_id = newc++;
}
}
} else {
newchild = NULL;
}
kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
pvd->vdev_child = newchild;
pvd->vdev_children = newc;
}
/*
* Allocate and minimally initialize a vdev_t.
*/
vdev_t *
vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
{
vdev_t *vd;
vdev_indirect_config_t *vic;
vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
vic = &vd->vdev_indirect_config;
if (spa->spa_root_vdev == NULL) {
ASSERT(ops == &vdev_root_ops);
spa->spa_root_vdev = vd;
spa->spa_load_guid = spa_generate_guid(NULL);
}
if (guid == 0 && ops != &vdev_hole_ops) {
if (spa->spa_root_vdev == vd) {
/*
* The root vdev's guid will also be the pool guid,
* which must be unique among all pools.
*/
guid = spa_generate_guid(NULL);
} else {
/*
* Any other vdev's guid must be unique within the pool.
*/
guid = spa_generate_guid(spa);
}
ASSERT(!spa_guid_exists(spa_guid(spa), guid));
}
vd->vdev_spa = spa;
vd->vdev_id = id;
vd->vdev_guid = guid;
vd->vdev_guid_sum = guid;
vd->vdev_ops = ops;
vd->vdev_state = VDEV_STATE_CLOSED;
vd->vdev_ishole = (ops == &vdev_hole_ops);
vic->vic_prev_indirect_vdev = UINT64_MAX;
rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL);
mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL);
vd->vdev_obsolete_segments = range_tree_create(NULL, RANGE_SEG64, NULL,
0, 0);
/*
* Initialize rate limit structs for events. We rate limit ZIO delay
* and checksum events so that we don't overwhelm ZED with thousands
* of events when a disk is acting up.
*/
zfs_ratelimit_init(&vd->vdev_delay_rl, &zfs_slow_io_events_per_second,
1);
zfs_ratelimit_init(&vd->vdev_deadman_rl, &zfs_slow_io_events_per_second,
1);
zfs_ratelimit_init(&vd->vdev_checksum_rl,
&zfs_checksum_events_per_second, 1);
/*
* Default Thresholds for tuning ZED
*/
vd->vdev_checksum_n = vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_N);
vd->vdev_checksum_t = vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_T);
vd->vdev_io_n = vdev_prop_default_numeric(VDEV_PROP_IO_N);
vd->vdev_io_t = vdev_prop_default_numeric(VDEV_PROP_IO_T);
list_link_init(&vd->vdev_config_dirty_node);
list_link_init(&vd->vdev_state_dirty_node);
list_link_init(&vd->vdev_initialize_node);
list_link_init(&vd->vdev_leaf_node);
list_link_init(&vd->vdev_trim_node);
mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL);
mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_initialize_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_initialize_io_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_initialize_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_initialize_io_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&vd->vdev_trim_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_autotrim_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_trim_io_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_trim_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_autotrim_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_autotrim_kick_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_trim_io_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&vd->vdev_rebuild_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_rebuild_cv, NULL, CV_DEFAULT, NULL);
for (int t = 0; t < DTL_TYPES; t++) {
vd->vdev_dtl[t] = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
0);
}
txg_list_create(&vd->vdev_ms_list, spa,
offsetof(struct metaslab, ms_txg_node));
txg_list_create(&vd->vdev_dtl_list, spa,
offsetof(struct vdev, vdev_dtl_node));
vd->vdev_stat.vs_timestamp = gethrtime();
vdev_queue_init(vd);
return (vd);
}
/*
* Allocate a new vdev. The 'alloctype' is used to control whether we are
* creating a new vdev or loading an existing one - the behavior is slightly
* different for each case.
*/
int
vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
int alloctype)
{
vdev_ops_t *ops;
const char *type;
uint64_t guid = 0, islog;
vdev_t *vd;
vdev_indirect_config_t *vic;
const char *tmp = NULL;
int rc;
vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
boolean_t top_level = (parent && !parent->vdev_parent);
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
return (SET_ERROR(EINVAL));
if ((ops = vdev_getops(type)) == NULL)
return (SET_ERROR(EINVAL));
/*
* If this is a load, get the vdev guid from the nvlist.
* Otherwise, vdev_alloc_common() will generate one for us.
*/
if (alloctype == VDEV_ALLOC_LOAD) {
uint64_t label_id;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
label_id != id)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_SPARE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_L2CACHE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
}
/*
* The first allocated vdev must be of type 'root'.
*/
if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
return (SET_ERROR(EINVAL));
/*
* Determine whether we're a log vdev.
*/
islog = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
return (SET_ERROR(ENOTSUP));
if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
return (SET_ERROR(ENOTSUP));
if (top_level && alloctype == VDEV_ALLOC_ADD) {
const char *bias;
/*
* If creating a top-level vdev, check for allocation
* classes input.
*/
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS,
&bias) == 0) {
alloc_bias = vdev_derive_alloc_bias(bias);
/* spa_vdev_add() expects feature to be enabled */
if (spa->spa_load_state != SPA_LOAD_CREATE &&
!spa_feature_is_enabled(spa,
SPA_FEATURE_ALLOCATION_CLASSES)) {
return (SET_ERROR(ENOTSUP));
}
}
/* spa_vdev_add() expects feature to be enabled */
if (ops == &vdev_draid_ops &&
spa->spa_load_state != SPA_LOAD_CREATE &&
!spa_feature_is_enabled(spa, SPA_FEATURE_DRAID)) {
return (SET_ERROR(ENOTSUP));
}
}
/*
* Initialize the vdev specific data. This is done before calling
* vdev_alloc_common() since it may fail and this simplifies the
* error reporting and cleanup code paths.
*/
void *tsd = NULL;
if (ops->vdev_op_init != NULL) {
rc = ops->vdev_op_init(spa, nv, &tsd);
if (rc != 0) {
return (rc);
}
}
vd = vdev_alloc_common(spa, id, guid, ops);
vd->vdev_tsd = tsd;
vd->vdev_islog = islog;
if (top_level && alloc_bias != VDEV_BIAS_NONE)
vd->vdev_alloc_bias = alloc_bias;
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &tmp) == 0)
vd->vdev_path = spa_strdup(tmp);
/*
* ZPOOL_CONFIG_AUX_STATE = "external" means we previously forced a
* fault on a vdev and want it to persist across imports (like with
* zpool offline -f).
*/
rc = nvlist_lookup_string(nv, ZPOOL_CONFIG_AUX_STATE, &tmp);
if (rc == 0 && tmp != NULL && strcmp(tmp, "external") == 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
vd->vdev_faulted = 1;
vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
}
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &tmp) == 0)
vd->vdev_devid = spa_strdup(tmp);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, &tmp) == 0)
vd->vdev_physpath = spa_strdup(tmp);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
&tmp) == 0)
vd->vdev_enc_sysfs_path = spa_strdup(tmp);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &tmp) == 0)
vd->vdev_fru = spa_strdup(tmp);
/*
* Set the whole_disk property. If it's not specified, leave the value
* as -1.
*/
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
&vd->vdev_wholedisk) != 0)
vd->vdev_wholedisk = -1ULL;
vic = &vd->vdev_indirect_config;
ASSERT0(vic->vic_mapping_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
&vic->vic_mapping_object);
ASSERT0(vic->vic_births_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
&vic->vic_births_object);
ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
&vic->vic_prev_indirect_vdev);
/*
* Look for the 'not present' flag. This will only be set if the device
* was not present at the time of import.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&vd->vdev_not_present);
/*
- * Get the alignment requirement.
+ * Get the alignment requirement. Ignore pool ashift for vdev
+ * attach case.
*/
- (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift);
+ if (alloctype != VDEV_ALLOC_ATTACH) {
+ (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT,
+ &vd->vdev_ashift);
+ } else {
+ vd->vdev_attaching = B_TRUE;
+ }
/*
* Retrieve the vdev creation time.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG,
&vd->vdev_crtxg);
if (vd->vdev_ops == &vdev_root_ops &&
(alloctype == VDEV_ALLOC_LOAD ||
alloctype == VDEV_ALLOC_SPLIT ||
alloctype == VDEV_ALLOC_ROOTPOOL)) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_ROOT_ZAP,
&vd->vdev_root_zap);
}
/*
* If we're a top-level vdev, try to load the allocation parameters.
*/
if (top_level &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
&vd->vdev_ms_array);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
&vd->vdev_ms_shift);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
&vd->vdev_asize);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NONALLOCATING,
&vd->vdev_noalloc);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING,
&vd->vdev_removing);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
&vd->vdev_top_zap);
} else {
ASSERT0(vd->vdev_top_zap);
}
if (top_level && alloctype != VDEV_ALLOC_ATTACH) {
ASSERT(alloctype == VDEV_ALLOC_LOAD ||
alloctype == VDEV_ALLOC_ADD ||
alloctype == VDEV_ALLOC_SPLIT ||
alloctype == VDEV_ALLOC_ROOTPOOL);
/* Note: metaslab_group_create() is now deferred */
}
if (vd->vdev_ops->vdev_op_leaf &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
(void) nvlist_lookup_uint64(nv,
ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap);
} else {
ASSERT0(vd->vdev_leaf_zap);
}
/*
* If we're a leaf vdev, try to load the DTL object and other state.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE ||
alloctype == VDEV_ALLOC_ROOTPOOL)) {
if (alloctype == VDEV_ALLOC_LOAD) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
&vd->vdev_dtl_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
&vd->vdev_unspare);
}
if (alloctype == VDEV_ALLOC_ROOTPOOL) {
uint64_t spare = 0;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
&spare) == 0 && spare)
spa_spare_add(vd);
}
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
&vd->vdev_offline);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
&vd->vdev_resilver_txg);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REBUILD_TXG,
&vd->vdev_rebuild_txg);
if (nvlist_exists(nv, ZPOOL_CONFIG_RESILVER_DEFER))
vdev_defer_resilver(vd);
/*
* In general, when importing a pool we want to ignore the
* persistent fault state, as the diagnosis made on another
* system may not be valid in the current context. The only
* exception is if we forced a vdev to a persistently faulted
* state with 'zpool offline -f'. The persistent fault will
* remain across imports until cleared.
*
* Local vdevs will remain in the faulted state.
*/
if (spa_load_state(spa) == SPA_LOAD_OPEN ||
spa_load_state(spa) == SPA_LOAD_IMPORT) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
&vd->vdev_faulted);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
&vd->vdev_degraded);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
&vd->vdev_removed);
if (vd->vdev_faulted || vd->vdev_degraded) {
const char *aux;
vd->vdev_label_aux =
VDEV_AUX_ERR_EXCEEDED;
if (nvlist_lookup_string(nv,
ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
strcmp(aux, "external") == 0)
vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
else
vd->vdev_faulted = 0ULL;
}
}
}
/*
* Add ourselves to the parent's list of children.
*/
vdev_add_child(parent, vd);
*vdp = vd;
return (0);
}
void
vdev_free(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
ASSERT3P(vd->vdev_trim_thread, ==, NULL);
ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
/*
* Scan queues are normally destroyed at the end of a scan. If the
* queue exists here, that implies the vdev is being removed while
* the scan is still running.
*/
if (vd->vdev_scan_io_queue != NULL) {
mutex_enter(&vd->vdev_scan_io_queue_lock);
dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue);
vd->vdev_scan_io_queue = NULL;
mutex_exit(&vd->vdev_scan_io_queue_lock);
}
/*
* vdev_free() implies closing the vdev first. This is simpler than
* trying to ensure complicated semantics for all callers.
*/
vdev_close(vd);
ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
/*
* Free all children.
*/
for (int c = 0; c < vd->vdev_children; c++)
vdev_free(vd->vdev_child[c]);
ASSERT(vd->vdev_child == NULL);
ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
if (vd->vdev_ops->vdev_op_fini != NULL)
vd->vdev_ops->vdev_op_fini(vd);
/*
* Discard allocation state.
*/
if (vd->vdev_mg != NULL) {
vdev_metaslab_fini(vd);
metaslab_group_destroy(vd->vdev_mg);
vd->vdev_mg = NULL;
}
if (vd->vdev_log_mg != NULL) {
ASSERT0(vd->vdev_ms_count);
metaslab_group_destroy(vd->vdev_log_mg);
vd->vdev_log_mg = NULL;
}
ASSERT0(vd->vdev_stat.vs_space);
ASSERT0(vd->vdev_stat.vs_dspace);
ASSERT0(vd->vdev_stat.vs_alloc);
/*
* Remove this vdev from its parent's child list.
*/
vdev_remove_child(vd->vdev_parent, vd);
ASSERT(vd->vdev_parent == NULL);
ASSERT(!list_link_active(&vd->vdev_leaf_node));
/*
* Clean up vdev structure.
*/
vdev_queue_fini(vd);
if (vd->vdev_path)
spa_strfree(vd->vdev_path);
if (vd->vdev_devid)
spa_strfree(vd->vdev_devid);
if (vd->vdev_physpath)
spa_strfree(vd->vdev_physpath);
if (vd->vdev_enc_sysfs_path)
spa_strfree(vd->vdev_enc_sysfs_path);
if (vd->vdev_fru)
spa_strfree(vd->vdev_fru);
if (vd->vdev_isspare)
spa_spare_remove(vd);
if (vd->vdev_isl2cache)
spa_l2cache_remove(vd);
txg_list_destroy(&vd->vdev_ms_list);
txg_list_destroy(&vd->vdev_dtl_list);
mutex_enter(&vd->vdev_dtl_lock);
space_map_close(vd->vdev_dtl_sm);
for (int t = 0; t < DTL_TYPES; t++) {
range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
range_tree_destroy(vd->vdev_dtl[t]);
}
mutex_exit(&vd->vdev_dtl_lock);
EQUIV(vd->vdev_indirect_births != NULL,
vd->vdev_indirect_mapping != NULL);
if (vd->vdev_indirect_births != NULL) {
vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
vdev_indirect_births_close(vd->vdev_indirect_births);
}
if (vd->vdev_obsolete_sm != NULL) {
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
space_map_close(vd->vdev_obsolete_sm);
vd->vdev_obsolete_sm = NULL;
}
range_tree_destroy(vd->vdev_obsolete_segments);
rw_destroy(&vd->vdev_indirect_rwlock);
mutex_destroy(&vd->vdev_obsolete_lock);
mutex_destroy(&vd->vdev_dtl_lock);
mutex_destroy(&vd->vdev_stat_lock);
mutex_destroy(&vd->vdev_probe_lock);
mutex_destroy(&vd->vdev_scan_io_queue_lock);
mutex_destroy(&vd->vdev_initialize_lock);
mutex_destroy(&vd->vdev_initialize_io_lock);
cv_destroy(&vd->vdev_initialize_io_cv);
cv_destroy(&vd->vdev_initialize_cv);
mutex_destroy(&vd->vdev_trim_lock);
mutex_destroy(&vd->vdev_autotrim_lock);
mutex_destroy(&vd->vdev_trim_io_lock);
cv_destroy(&vd->vdev_trim_cv);
cv_destroy(&vd->vdev_autotrim_cv);
cv_destroy(&vd->vdev_autotrim_kick_cv);
cv_destroy(&vd->vdev_trim_io_cv);
mutex_destroy(&vd->vdev_rebuild_lock);
cv_destroy(&vd->vdev_rebuild_cv);
zfs_ratelimit_fini(&vd->vdev_delay_rl);
zfs_ratelimit_fini(&vd->vdev_deadman_rl);
zfs_ratelimit_fini(&vd->vdev_checksum_rl);
if (vd == spa->spa_root_vdev)
spa->spa_root_vdev = NULL;
kmem_free(vd, sizeof (vdev_t));
}
/*
* Transfer top-level vdev state from svd to tvd.
*/
static void
vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
{
spa_t *spa = svd->vdev_spa;
metaslab_t *msp;
vdev_t *vd;
int t;
ASSERT(tvd == tvd->vdev_top);
- tvd->vdev_pending_fastwrite = svd->vdev_pending_fastwrite;
tvd->vdev_ms_array = svd->vdev_ms_array;
tvd->vdev_ms_shift = svd->vdev_ms_shift;
tvd->vdev_ms_count = svd->vdev_ms_count;
tvd->vdev_top_zap = svd->vdev_top_zap;
svd->vdev_ms_array = 0;
svd->vdev_ms_shift = 0;
svd->vdev_ms_count = 0;
svd->vdev_top_zap = 0;
if (tvd->vdev_mg)
ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
if (tvd->vdev_log_mg)
ASSERT3P(tvd->vdev_log_mg, ==, svd->vdev_log_mg);
tvd->vdev_mg = svd->vdev_mg;
tvd->vdev_log_mg = svd->vdev_log_mg;
tvd->vdev_ms = svd->vdev_ms;
svd->vdev_mg = NULL;
svd->vdev_log_mg = NULL;
svd->vdev_ms = NULL;
if (tvd->vdev_mg != NULL)
tvd->vdev_mg->mg_vd = tvd;
if (tvd->vdev_log_mg != NULL)
tvd->vdev_log_mg->mg_vd = tvd;
tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm;
svd->vdev_checkpoint_sm = NULL;
tvd->vdev_alloc_bias = svd->vdev_alloc_bias;
svd->vdev_alloc_bias = VDEV_BIAS_NONE;
tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
svd->vdev_stat.vs_alloc = 0;
svd->vdev_stat.vs_space = 0;
svd->vdev_stat.vs_dspace = 0;
/*
* State which may be set on a top-level vdev that's in the
* process of being removed.
*/
ASSERT0(tvd->vdev_indirect_config.vic_births_object);
ASSERT0(tvd->vdev_indirect_config.vic_mapping_object);
ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL);
ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL);
ASSERT3P(tvd->vdev_indirect_births, ==, NULL);
ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL);
ASSERT0(tvd->vdev_noalloc);
ASSERT0(tvd->vdev_removing);
ASSERT0(tvd->vdev_rebuilding);
tvd->vdev_noalloc = svd->vdev_noalloc;
tvd->vdev_removing = svd->vdev_removing;
tvd->vdev_rebuilding = svd->vdev_rebuilding;
tvd->vdev_rebuild_config = svd->vdev_rebuild_config;
tvd->vdev_indirect_config = svd->vdev_indirect_config;
tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping;
tvd->vdev_indirect_births = svd->vdev_indirect_births;
range_tree_swap(&svd->vdev_obsolete_segments,
&tvd->vdev_obsolete_segments);
tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm;
svd->vdev_indirect_config.vic_mapping_object = 0;
svd->vdev_indirect_config.vic_births_object = 0;
svd->vdev_indirect_config.vic_prev_indirect_vdev = -1ULL;
svd->vdev_indirect_mapping = NULL;
svd->vdev_indirect_births = NULL;
svd->vdev_obsolete_sm = NULL;
svd->vdev_noalloc = 0;
svd->vdev_removing = 0;
svd->vdev_rebuilding = 0;
for (t = 0; t < TXG_SIZE; t++) {
while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
(void) txg_list_add(&tvd->vdev_ms_list, msp, t);
while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
(void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
(void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
}
if (list_link_active(&svd->vdev_config_dirty_node)) {
vdev_config_clean(svd);
vdev_config_dirty(tvd);
}
if (list_link_active(&svd->vdev_state_dirty_node)) {
vdev_state_clean(svd);
vdev_state_dirty(tvd);
}
tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
svd->vdev_deflate_ratio = 0;
tvd->vdev_islog = svd->vdev_islog;
svd->vdev_islog = 0;
dsl_scan_io_queue_vdev_xfer(svd, tvd);
}
static void
vdev_top_update(vdev_t *tvd, vdev_t *vd)
{
if (vd == NULL)
return;
vd->vdev_top = tvd;
for (int c = 0; c < vd->vdev_children; c++)
vdev_top_update(tvd, vd->vdev_child[c]);
}
/*
* Add a mirror/replacing vdev above an existing vdev. There is no need to
* call .vdev_op_init() since mirror/replacing vdevs do not have private state.
*/
vdev_t *
vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
{
spa_t *spa = cvd->vdev_spa;
vdev_t *pvd = cvd->vdev_parent;
vdev_t *mvd;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
mvd->vdev_asize = cvd->vdev_asize;
mvd->vdev_min_asize = cvd->vdev_min_asize;
mvd->vdev_max_asize = cvd->vdev_max_asize;
mvd->vdev_psize = cvd->vdev_psize;
mvd->vdev_ashift = cvd->vdev_ashift;
mvd->vdev_logical_ashift = cvd->vdev_logical_ashift;
mvd->vdev_physical_ashift = cvd->vdev_physical_ashift;
mvd->vdev_state = cvd->vdev_state;
mvd->vdev_crtxg = cvd->vdev_crtxg;
vdev_remove_child(pvd, cvd);
vdev_add_child(pvd, mvd);
cvd->vdev_id = mvd->vdev_children;
vdev_add_child(mvd, cvd);
vdev_top_update(cvd->vdev_top, cvd->vdev_top);
if (mvd == mvd->vdev_top)
vdev_top_transfer(cvd, mvd);
return (mvd);
}
/*
* Remove a 1-way mirror/replacing vdev from the tree.
*/
void
vdev_remove_parent(vdev_t *cvd)
{
vdev_t *mvd = cvd->vdev_parent;
vdev_t *pvd = mvd->vdev_parent;
ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(mvd->vdev_children == 1);
ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
mvd->vdev_ops == &vdev_replacing_ops ||
mvd->vdev_ops == &vdev_spare_ops);
cvd->vdev_ashift = mvd->vdev_ashift;
cvd->vdev_logical_ashift = mvd->vdev_logical_ashift;
cvd->vdev_physical_ashift = mvd->vdev_physical_ashift;
vdev_remove_child(mvd, cvd);
vdev_remove_child(pvd, mvd);
/*
* If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
* Otherwise, we could have detached an offline device, and when we
* go to import the pool we'll think we have two top-level vdevs,
* instead of a different version of the same top-level vdev.
*/
if (mvd->vdev_top == mvd) {
uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
cvd->vdev_orig_guid = cvd->vdev_guid;
cvd->vdev_guid += guid_delta;
cvd->vdev_guid_sum += guid_delta;
/*
* If pool not set for autoexpand, we need to also preserve
* mvd's asize to prevent automatic expansion of cvd.
* Otherwise if we are adjusting the mirror by attaching and
* detaching children of non-uniform sizes, the mirror could
* autoexpand, unexpectedly requiring larger devices to
* re-establish the mirror.
*/
if (!cvd->vdev_spa->spa_autoexpand)
cvd->vdev_asize = mvd->vdev_asize;
}
cvd->vdev_id = mvd->vdev_id;
vdev_add_child(pvd, cvd);
vdev_top_update(cvd->vdev_top, cvd->vdev_top);
if (cvd == cvd->vdev_top)
vdev_top_transfer(mvd, cvd);
ASSERT(mvd->vdev_children == 0);
vdev_free(mvd);
}
+/*
+ * Choose GCD for spa_gcd_alloc.
+ */
+static uint64_t
+vdev_gcd(uint64_t a, uint64_t b)
+{
+ while (b != 0) {
+ uint64_t t = b;
+ b = a % b;
+ a = t;
+ }
+ return (a);
+}
+
+/*
+ * Set spa_min_alloc and spa_gcd_alloc.
+ */
+static void
+vdev_spa_set_alloc(spa_t *spa, uint64_t min_alloc)
+{
+ if (min_alloc < spa->spa_min_alloc)
+ spa->spa_min_alloc = min_alloc;
+ if (spa->spa_gcd_alloc == INT_MAX) {
+ spa->spa_gcd_alloc = min_alloc;
+ } else {
+ spa->spa_gcd_alloc = vdev_gcd(min_alloc,
+ spa->spa_gcd_alloc);
+ }
+}
+
void
vdev_metaslab_group_create(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
/*
* metaslab_group_create was delayed until allocation bias was available
*/
if (vd->vdev_mg == NULL) {
metaslab_class_t *mc;
if (vd->vdev_islog && vd->vdev_alloc_bias == VDEV_BIAS_NONE)
vd->vdev_alloc_bias = VDEV_BIAS_LOG;
ASSERT3U(vd->vdev_islog, ==,
(vd->vdev_alloc_bias == VDEV_BIAS_LOG));
switch (vd->vdev_alloc_bias) {
case VDEV_BIAS_LOG:
mc = spa_log_class(spa);
break;
case VDEV_BIAS_SPECIAL:
mc = spa_special_class(spa);
break;
case VDEV_BIAS_DEDUP:
mc = spa_dedup_class(spa);
break;
default:
mc = spa_normal_class(spa);
}
vd->vdev_mg = metaslab_group_create(mc, vd,
spa->spa_alloc_count);
if (!vd->vdev_islog) {
vd->vdev_log_mg = metaslab_group_create(
spa_embedded_log_class(spa), vd, 1);
}
/*
* The spa ashift min/max only apply for the normal metaslab
* class. Class destination is late binding so ashift boundary
* setting had to wait until now.
*/
if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
mc == spa_normal_class(spa) && vd->vdev_aux == NULL) {
if (vd->vdev_ashift > spa->spa_max_ashift)
spa->spa_max_ashift = vd->vdev_ashift;
if (vd->vdev_ashift < spa->spa_min_ashift)
spa->spa_min_ashift = vd->vdev_ashift;
uint64_t min_alloc = vdev_get_min_alloc(vd);
- if (min_alloc < spa->spa_min_alloc)
- spa->spa_min_alloc = min_alloc;
+ vdev_spa_set_alloc(spa, min_alloc);
}
}
}
int
vdev_metaslab_init(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
uint64_t oldc = vd->vdev_ms_count;
uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
metaslab_t **mspp;
int error;
boolean_t expanding = (oldc != 0);
ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
/*
* This vdev is not being allocated from yet or is a hole.
*/
if (vd->vdev_ms_shift == 0)
return (0);
ASSERT(!vd->vdev_ishole);
ASSERT(oldc <= newc);
mspp = vmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
if (expanding) {
memcpy(mspp, vd->vdev_ms, oldc * sizeof (*mspp));
vmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
}
vd->vdev_ms = mspp;
vd->vdev_ms_count = newc;
for (uint64_t m = oldc; m < newc; m++) {
uint64_t object = 0;
/*
* vdev_ms_array may be 0 if we are creating the "fake"
* metaslabs for an indirect vdev for zdb's leak detection.
* See zdb_leak_init().
*/
if (txg == 0 && vd->vdev_ms_array != 0) {
error = dmu_read(spa->spa_meta_objset,
vd->vdev_ms_array,
m * sizeof (uint64_t), sizeof (uint64_t), &object,
DMU_READ_PREFETCH);
if (error != 0) {
vdev_dbgmsg(vd, "unable to read the metaslab "
"array [error=%d]", error);
return (error);
}
}
error = metaslab_init(vd->vdev_mg, m, object, txg,
&(vd->vdev_ms[m]));
if (error != 0) {
vdev_dbgmsg(vd, "metaslab_init failed [error=%d]",
error);
return (error);
}
}
/*
* Find the emptiest metaslab on the vdev and mark it for use for
* embedded slog by moving it from the regular to the log metaslab
* group.
*/
if (vd->vdev_mg->mg_class == spa_normal_class(spa) &&
vd->vdev_ms_count > zfs_embedded_slog_min_ms &&
avl_is_empty(&vd->vdev_log_mg->mg_metaslab_tree)) {
uint64_t slog_msid = 0;
uint64_t smallest = UINT64_MAX;
/*
* Note, we only search the new metaslabs, because the old
* (pre-existing) ones may be active (e.g. have non-empty
* range_tree's), and we don't move them to the new
* metaslab_t.
*/
for (uint64_t m = oldc; m < newc; m++) {
uint64_t alloc =
space_map_allocated(vd->vdev_ms[m]->ms_sm);
if (alloc < smallest) {
slog_msid = m;
smallest = alloc;
}
}
metaslab_t *slog_ms = vd->vdev_ms[slog_msid];
/*
* The metaslab was marked as dirty at the end of
* metaslab_init(). Remove it from the dirty list so that we
* can uninitialize and reinitialize it to the new class.
*/
if (txg != 0) {
(void) txg_list_remove_this(&vd->vdev_ms_list,
slog_ms, txg);
}
uint64_t sm_obj = space_map_object(slog_ms->ms_sm);
metaslab_fini(slog_ms);
VERIFY0(metaslab_init(vd->vdev_log_mg, slog_msid, sm_obj, txg,
&vd->vdev_ms[slog_msid]));
}
if (txg == 0)
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER);
/*
* If the vdev is marked as non-allocating then don't
* activate the metaslabs since we want to ensure that
* no allocations are performed on this device.
*/
if (vd->vdev_noalloc) {
/* track non-allocating vdev space */
spa->spa_nonallocating_dspace += spa_deflate(spa) ?
vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
} else if (!expanding) {
metaslab_group_activate(vd->vdev_mg);
if (vd->vdev_log_mg != NULL)
metaslab_group_activate(vd->vdev_log_mg);
}
if (txg == 0)
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (0);
}
void
vdev_metaslab_fini(vdev_t *vd)
{
if (vd->vdev_checkpoint_sm != NULL) {
ASSERT(spa_feature_is_active(vd->vdev_spa,
SPA_FEATURE_POOL_CHECKPOINT));
space_map_close(vd->vdev_checkpoint_sm);
/*
* Even though we close the space map, we need to set its
* pointer to NULL. The reason is that vdev_metaslab_fini()
* may be called multiple times for certain operations
* (i.e. when destroying a pool) so we need to ensure that
* this clause never executes twice. This logic is similar
* to the one used for the vdev_ms clause below.
*/
vd->vdev_checkpoint_sm = NULL;
}
if (vd->vdev_ms != NULL) {
metaslab_group_t *mg = vd->vdev_mg;
metaslab_group_passivate(mg);
if (vd->vdev_log_mg != NULL) {
ASSERT(!vd->vdev_islog);
metaslab_group_passivate(vd->vdev_log_mg);
}
uint64_t count = vd->vdev_ms_count;
for (uint64_t m = 0; m < count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
if (msp != NULL)
metaslab_fini(msp);
}
vmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
vd->vdev_ms = NULL;
vd->vdev_ms_count = 0;
for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
ASSERT0(mg->mg_histogram[i]);
if (vd->vdev_log_mg != NULL)
ASSERT0(vd->vdev_log_mg->mg_histogram[i]);
}
}
ASSERT0(vd->vdev_ms_count);
- ASSERT3U(vd->vdev_pending_fastwrite, ==, 0);
}
typedef struct vdev_probe_stats {
boolean_t vps_readable;
boolean_t vps_writeable;
int vps_flags;
} vdev_probe_stats_t;
static void
vdev_probe_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
vdev_t *vd = zio->io_vd;
vdev_probe_stats_t *vps = zio->io_private;
ASSERT(vd->vdev_probe_zio != NULL);
if (zio->io_type == ZIO_TYPE_READ) {
if (zio->io_error == 0)
vps->vps_readable = 1;
if (zio->io_error == 0 && spa_writeable(spa)) {
zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
zio->io_offset, zio->io_size, zio->io_abd,
ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
} else {
abd_free(zio->io_abd);
}
} else if (zio->io_type == ZIO_TYPE_WRITE) {
if (zio->io_error == 0)
vps->vps_writeable = 1;
abd_free(zio->io_abd);
} else if (zio->io_type == ZIO_TYPE_NULL) {
zio_t *pio;
zio_link_t *zl;
vd->vdev_cant_read |= !vps->vps_readable;
vd->vdev_cant_write |= !vps->vps_writeable;
if (vdev_readable(vd) &&
(vdev_writeable(vd) || !spa_writeable(spa))) {
zio->io_error = 0;
} else {
ASSERT(zio->io_error != 0);
vdev_dbgmsg(vd, "failed probe");
(void) zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
spa, vd, NULL, NULL, 0);
zio->io_error = SET_ERROR(ENXIO);
}
mutex_enter(&vd->vdev_probe_lock);
ASSERT(vd->vdev_probe_zio == zio);
vd->vdev_probe_zio = NULL;
mutex_exit(&vd->vdev_probe_lock);
zl = NULL;
while ((pio = zio_walk_parents(zio, &zl)) != NULL)
if (!vdev_accessible(vd, pio))
pio->io_error = SET_ERROR(ENXIO);
kmem_free(vps, sizeof (*vps));
}
}
/*
* Determine whether this device is accessible.
*
* Read and write to several known locations: the pad regions of each
* vdev label but the first, which we leave alone in case it contains
* a VTOC.
*/
zio_t *
vdev_probe(vdev_t *vd, zio_t *zio)
{
spa_t *spa = vd->vdev_spa;
vdev_probe_stats_t *vps = NULL;
zio_t *pio;
ASSERT(vd->vdev_ops->vdev_op_leaf);
/*
* Don't probe the probe.
*/
if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
return (NULL);
/*
* To prevent 'probe storms' when a device fails, we create
* just one probe i/o at a time. All zios that want to probe
* this vdev will become parents of the probe io.
*/
mutex_enter(&vd->vdev_probe_lock);
if ((pio = vd->vdev_probe_zio) == NULL) {
vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
ZIO_FLAG_DONT_AGGREGATE | ZIO_FLAG_TRYHARD;
if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
/*
* vdev_cant_read and vdev_cant_write can only
* transition from TRUE to FALSE when we have the
* SCL_ZIO lock as writer; otherwise they can only
* transition from FALSE to TRUE. This ensures that
* any zio looking at these values can assume that
* failures persist for the life of the I/O. That's
* important because when a device has intermittent
* connectivity problems, we want to ensure that
* they're ascribed to the device (ENXIO) and not
* the zio (EIO).
*
* Since we hold SCL_ZIO as writer here, clear both
* values so the probe can reevaluate from first
* principles.
*/
vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
}
vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
vdev_probe_done, vps,
vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
/*
* We can't change the vdev state in this context, so we
* kick off an async task to do it on our behalf.
*/
if (zio != NULL) {
vd->vdev_probe_wanted = B_TRUE;
spa_async_request(spa, SPA_ASYNC_PROBE);
}
}
if (zio != NULL)
zio_add_child(zio, pio);
mutex_exit(&vd->vdev_probe_lock);
if (vps == NULL) {
ASSERT(zio != NULL);
return (NULL);
}
for (int l = 1; l < VDEV_LABELS; l++) {
zio_nowait(zio_read_phys(pio, vd,
vdev_label_offset(vd->vdev_psize, l,
offsetof(vdev_label_t, vl_be)), VDEV_PAD_SIZE,
abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE),
ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
}
if (zio == NULL)
return (pio);
zio_nowait(pio);
return (NULL);
}
static void
vdev_load_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_load_error = vdev_load(vd);
}
static void
vdev_open_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_open_thread = curthread;
vd->vdev_open_error = vdev_open(vd);
vd->vdev_open_thread = NULL;
}
static boolean_t
vdev_uses_zvols(vdev_t *vd)
{
#ifdef _KERNEL
if (zvol_is_zvol(vd->vdev_path))
return (B_TRUE);
#endif
for (int c = 0; c < vd->vdev_children; c++)
if (vdev_uses_zvols(vd->vdev_child[c]))
return (B_TRUE);
return (B_FALSE);
}
/*
* Returns B_TRUE if the passed child should be opened.
*/
static boolean_t
vdev_default_open_children_func(vdev_t *vd)
{
(void) vd;
return (B_TRUE);
}
/*
* Open the requested child vdevs. If any of the leaf vdevs are using
* a ZFS volume then do the opens in a single thread. This avoids a
* deadlock when the current thread is holding the spa_namespace_lock.
*/
static void
vdev_open_children_impl(vdev_t *vd, vdev_open_children_func_t *open_func)
{
int children = vd->vdev_children;
taskq_t *tq = taskq_create("vdev_open", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
vd->vdev_nonrot = B_TRUE;
for (int c = 0; c < children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (open_func(cvd) == B_FALSE)
continue;
if (tq == NULL || vdev_uses_zvols(vd)) {
cvd->vdev_open_error = vdev_open(cvd);
} else {
VERIFY(taskq_dispatch(tq, vdev_open_child,
cvd, TQ_SLEEP) != TASKQID_INVALID);
}
vd->vdev_nonrot &= cvd->vdev_nonrot;
}
if (tq != NULL) {
taskq_wait(tq);
taskq_destroy(tq);
}
}
/*
* Open all child vdevs.
*/
void
vdev_open_children(vdev_t *vd)
{
vdev_open_children_impl(vd, vdev_default_open_children_func);
}
/*
* Conditionally open a subset of child vdevs.
*/
void
vdev_open_children_subset(vdev_t *vd, vdev_open_children_func_t *open_func)
{
vdev_open_children_impl(vd, open_func);
}
/*
* Compute the raidz-deflation ratio. Note, we hard-code
* in 128k (1 << 17) because it is the "typical" blocksize.
* Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change,
* otherwise it would inconsistently account for existing bp's.
*/
static void
vdev_set_deflate_ratio(vdev_t *vd)
{
if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) {
vd->vdev_deflate_ratio = (1 << 17) /
(vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
}
}
/*
* Choose the best of two ashifts, preferring one between logical ashift
* (absolute minimum) and administrator defined maximum, otherwise take
* the biggest of the two.
*/
uint64_t
vdev_best_ashift(uint64_t logical, uint64_t a, uint64_t b)
{
if (a > logical && a <= zfs_vdev_max_auto_ashift) {
if (b <= logical || b > zfs_vdev_max_auto_ashift)
return (a);
else
return (MAX(a, b));
} else if (b <= logical || b > zfs_vdev_max_auto_ashift)
return (MAX(a, b));
return (b);
}
/*
* Maximize performance by inflating the configured ashift for top level
* vdevs to be as close to the physical ashift as possible while maintaining
* administrator defined limits and ensuring it doesn't go below the
* logical ashift.
*/
static void
vdev_ashift_optimize(vdev_t *vd)
{
ASSERT(vd == vd->vdev_top);
if (vd->vdev_ashift < vd->vdev_physical_ashift &&
vd->vdev_physical_ashift <= zfs_vdev_max_auto_ashift) {
vd->vdev_ashift = MIN(
MAX(zfs_vdev_max_auto_ashift, vd->vdev_ashift),
MAX(zfs_vdev_min_auto_ashift,
vd->vdev_physical_ashift));
} else {
/*
* If the logical and physical ashifts are the same, then
* we ensure that the top-level vdev's ashift is not smaller
* than our minimum ashift value. For the unusual case
* where logical ashift > physical ashift, we can't cap
* the calculated ashift based on max ashift as that
* would cause failures.
* We still check if we need to increase it to match
* the min ashift.
*/
vd->vdev_ashift = MAX(zfs_vdev_min_auto_ashift,
vd->vdev_ashift);
}
}
/*
* Prepare a virtual device for access.
*/
int
vdev_open(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
int error;
uint64_t osize = 0;
uint64_t max_osize = 0;
uint64_t asize, max_asize, psize;
uint64_t logical_ashift = 0;
uint64_t physical_ashift = 0;
ASSERT(vd->vdev_open_thread == curthread ||
spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
vd->vdev_state == VDEV_STATE_CANT_OPEN ||
vd->vdev_state == VDEV_STATE_OFFLINE);
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
vd->vdev_min_asize = vdev_get_min_asize(vd);
/*
* If this vdev is not removed, check its fault status. If it's
* faulted, bail out of the open.
*/
if (!vd->vdev_removed && vd->vdev_faulted) {
ASSERT(vd->vdev_children == 0);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (SET_ERROR(ENXIO));
} else if (vd->vdev_offline) {
ASSERT(vd->vdev_children == 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
return (SET_ERROR(ENXIO));
}
error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize,
&logical_ashift, &physical_ashift);
/* Keep the device in removed state if unplugged */
if (error == ENOENT && vd->vdev_removed) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_REMOVED,
VDEV_AUX_NONE);
return (error);
}
/*
* Physical volume size should never be larger than its max size, unless
* the disk has shrunk while we were reading it or the device is buggy
* or damaged: either way it's not safe for use, bail out of the open.
*/
if (osize > max_osize) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_OPEN_FAILED);
return (SET_ERROR(ENXIO));
}
/*
* Reset the vdev_reopening flag so that we actually close
* the vdev on error.
*/
vd->vdev_reopening = B_FALSE;
if (zio_injection_enabled && error == 0)
error = zio_handle_device_injection(vd, NULL, SET_ERROR(ENXIO));
if (error) {
if (vd->vdev_removed &&
vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
vd->vdev_removed = B_FALSE;
if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE,
vd->vdev_stat.vs_aux);
} else {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
vd->vdev_stat.vs_aux);
}
return (error);
}
vd->vdev_removed = B_FALSE;
/*
* Recheck the faulted flag now that we have confirmed that
* the vdev is accessible. If we're faulted, bail.
*/
if (vd->vdev_faulted) {
ASSERT(vd->vdev_children == 0);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (SET_ERROR(ENXIO));
}
if (vd->vdev_degraded) {
ASSERT(vd->vdev_children == 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
VDEV_AUX_ERR_EXCEEDED);
} else {
vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0);
}
/*
* For hole or missing vdevs we just return success.
*/
if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
return (0);
for (int c = 0; c < vd->vdev_children; c++) {
if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
VDEV_AUX_NONE);
break;
}
}
osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t));
if (vd->vdev_children == 0) {
if (osize < SPA_MINDEVSIZE) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
return (SET_ERROR(EOVERFLOW));
}
psize = osize;
asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
max_asize = max_osize - (VDEV_LABEL_START_SIZE +
VDEV_LABEL_END_SIZE);
} else {
if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
(VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
return (SET_ERROR(EOVERFLOW));
}
psize = 0;
asize = osize;
max_asize = max_osize;
}
/*
* If the vdev was expanded, record this so that we can re-create the
* uberblock rings in labels {2,3}, during the next sync.
*/
if ((psize > vd->vdev_psize) && (vd->vdev_psize != 0))
vd->vdev_copy_uberblocks = B_TRUE;
vd->vdev_psize = psize;
/*
* Make sure the allocatable size hasn't shrunk too much.
*/
if (asize < vd->vdev_min_asize) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
return (SET_ERROR(EINVAL));
}
/*
* We can always set the logical/physical ashift members since
* their values are only used to calculate the vdev_ashift when
* the device is first added to the config. These values should
* not be used for anything else since they may change whenever
* the device is reopened and we don't store them in the label.
*/
vd->vdev_physical_ashift =
MAX(physical_ashift, vd->vdev_physical_ashift);
vd->vdev_logical_ashift = MAX(logical_ashift,
vd->vdev_logical_ashift);
if (vd->vdev_asize == 0) {
/*
* This is the first-ever open, so use the computed values.
* For compatibility, a different ashift can be requested.
*/
vd->vdev_asize = asize;
vd->vdev_max_asize = max_asize;
/*
* If the vdev_ashift was not overridden at creation time,
* then set it the logical ashift and optimize the ashift.
*/
if (vd->vdev_ashift == 0) {
vd->vdev_ashift = vd->vdev_logical_ashift;
if (vd->vdev_logical_ashift > ASHIFT_MAX) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_ASHIFT_TOO_BIG);
return (SET_ERROR(EDOM));
}
- if (vd->vdev_top == vd) {
+ if (vd->vdev_top == vd && vd->vdev_attaching == B_FALSE)
vdev_ashift_optimize(vd);
- }
+ vd->vdev_attaching = B_FALSE;
}
if (vd->vdev_ashift != 0 && (vd->vdev_ashift < ASHIFT_MIN ||
vd->vdev_ashift > ASHIFT_MAX)) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_ASHIFT);
return (SET_ERROR(EDOM));
}
} else {
/*
* Make sure the alignment required hasn't increased.
*/
if (vd->vdev_ashift > vd->vdev_top->vdev_ashift &&
vd->vdev_ops->vdev_op_leaf) {
(void) zfs_ereport_post(
FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT,
spa, vd, NULL, NULL, 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
return (SET_ERROR(EDOM));
}
vd->vdev_max_asize = max_asize;
}
/*
* If all children are healthy we update asize if either:
* The asize has increased, due to a device expansion caused by dynamic
* LUN growth or vdev replacement, and automatic expansion is enabled;
* making the additional space available.
*
* The asize has decreased, due to a device shrink usually caused by a
* vdev replace with a smaller device. This ensures that calculations
* based of max_asize and asize e.g. esize are always valid. It's safe
* to do this as we've already validated that asize is greater than
* vdev_min_asize.
*/
if (vd->vdev_state == VDEV_STATE_HEALTHY &&
((asize > vd->vdev_asize &&
(vd->vdev_expanding || spa->spa_autoexpand)) ||
(asize < vd->vdev_asize)))
vd->vdev_asize = asize;
vdev_set_min_asize(vd);
/*
* Ensure we can issue some IO before declaring the
* vdev open for business.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(error = zio_wait(vdev_probe(vd, NULL))) != 0) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
VDEV_AUX_ERR_EXCEEDED);
return (error);
}
/*
* Track the minimum allocation size.
*/
if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
vd->vdev_islog == 0 && vd->vdev_aux == NULL) {
uint64_t min_alloc = vdev_get_min_alloc(vd);
- if (min_alloc < spa->spa_min_alloc)
- spa->spa_min_alloc = min_alloc;
+ vdev_spa_set_alloc(spa, min_alloc);
}
/*
* If this is a leaf vdev, assess whether a resilver is needed.
* But don't do this if we are doing a reopen for a scrub, since
* this would just restart the scrub we are already doing.
*/
if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen)
dsl_scan_assess_vdev(spa->spa_dsl_pool, vd);
return (0);
}
static void
vdev_validate_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_validate_thread = curthread;
vd->vdev_validate_error = vdev_validate(vd);
vd->vdev_validate_thread = NULL;
}
/*
* Called once the vdevs are all opened, this routine validates the label
* contents. This needs to be done before vdev_load() so that we don't
* inadvertently do repair I/Os to the wrong device.
*
* This function will only return failure if one of the vdevs indicates that it
* has since been destroyed or exported. This is only possible if
* /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
* will be updated but the function will return 0.
*/
int
vdev_validate(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
taskq_t *tq = NULL;
nvlist_t *label;
uint64_t guid = 0, aux_guid = 0, top_guid;
uint64_t state;
nvlist_t *nvl;
uint64_t txg;
int children = vd->vdev_children;
if (vdev_validate_skip)
return (0);
if (children > 0) {
tq = taskq_create("vdev_validate", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
}
for (uint64_t c = 0; c < children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (tq == NULL || vdev_uses_zvols(cvd)) {
vdev_validate_child(cvd);
} else {
VERIFY(taskq_dispatch(tq, vdev_validate_child, cvd,
TQ_SLEEP) != TASKQID_INVALID);
}
}
if (tq != NULL) {
taskq_wait(tq);
taskq_destroy(tq);
}
for (int c = 0; c < children; c++) {
int error = vd->vdev_child[c]->vdev_validate_error;
if (error != 0)
return (SET_ERROR(EBADF));
}
/*
* If the device has already failed, or was marked offline, don't do
* any further validation. Otherwise, label I/O will fail and we will
* overwrite the previous state.
*/
if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd))
return (0);
/*
* If we are performing an extreme rewind, we allow for a label that
* was modified at a point after the current txg.
* If config lock is not held do not check for the txg. spa_sync could
* be updating the vdev's label before updating spa_last_synced_txg.
*/
if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 ||
spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG)
txg = UINT64_MAX;
else
txg = spa_last_synced_txg(spa);
if ((label = vdev_label_read_config(vd, txg)) == NULL) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
vdev_dbgmsg(vd, "vdev_validate: failed reading config for "
"txg %llu", (u_longlong_t)txg);
return (0);
}
/*
* Determine if this vdev has been split off into another
* pool. If so, then refuse to open it.
*/
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID,
&aux_guid) == 0 && aux_guid == spa_guid(spa)) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_SPLIT_POOL);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool");
return (0);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_POOL_GUID);
return (0);
}
/*
* If config is not trusted then ignore the spa guid check. This is
* necessary because if the machine crashed during a re-guid the new
* guid might have been written to all of the vdev labels, but not the
* cached config. The check will be performed again once we have the
* trusted config from the MOS.
*/
if (spa->spa_trust_config && guid != spa_guid(spa)) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't "
"match config (%llu != %llu)", (u_longlong_t)guid,
(u_longlong_t)spa_guid(spa));
return (0);
}
if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl)
!= 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID,
&aux_guid) != 0)
aux_guid = 0;
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_GUID);
return (0);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid)
!= 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_TOP_GUID);
return (0);
}
/*
* If this vdev just became a top-level vdev because its sibling was
* detached, it will have adopted the parent's vdev guid -- but the
* label may or may not be on disk yet. Fortunately, either version
* of the label will have the same top guid, so if we're a top-level
* vdev, we can safely compare to that instead.
* However, if the config comes from a cachefile that failed to update
* after the detach, a top-level vdev will appear as a non top-level
* vdev in the config. Also relax the constraints if we perform an
* extreme rewind.
*
* If we split this vdev off instead, then we also check the
* original pool's guid. We don't want to consider the vdev
* corrupt if it is partway through a split operation.
*/
if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) {
boolean_t mismatch = B_FALSE;
if (spa->spa_trust_config && !spa->spa_extreme_rewind) {
if (vd != vd->vdev_top || vd->vdev_guid != top_guid)
mismatch = B_TRUE;
} else {
if (vd->vdev_guid != top_guid &&
vd->vdev_top->vdev_guid != guid)
mismatch = B_TRUE;
}
if (mismatch) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: config guid "
"doesn't match label guid");
vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu",
(u_longlong_t)vd->vdev_guid,
(u_longlong_t)vd->vdev_top->vdev_guid);
vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, "
"aux_guid %llu", (u_longlong_t)guid,
(u_longlong_t)top_guid, (u_longlong_t)aux_guid);
return (0);
}
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
&state) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_POOL_STATE);
return (0);
}
nvlist_free(label);
/*
* If this is a verbatim import, no need to check the
* state of the pool.
*/
if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
spa_load_state(spa) == SPA_LOAD_OPEN &&
state != POOL_STATE_ACTIVE) {
vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) "
"for spa %s", (u_longlong_t)state, spa->spa_name);
return (SET_ERROR(EBADF));
}
/*
* If we were able to open and validate a vdev that was
* previously marked permanently unavailable, clear that state
* now.
*/
if (vd->vdev_not_present)
vd->vdev_not_present = 0;
return (0);
}
static void
vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd)
{
char *old, *new;
if (svd->vdev_path != NULL && dvd->vdev_path != NULL) {
if (strcmp(svd->vdev_path, dvd->vdev_path) != 0) {
zfs_dbgmsg("vdev_copy_path: vdev %llu: path changed "
"from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid,
dvd->vdev_path, svd->vdev_path);
spa_strfree(dvd->vdev_path);
dvd->vdev_path = spa_strdup(svd->vdev_path);
}
} else if (svd->vdev_path != NULL) {
dvd->vdev_path = spa_strdup(svd->vdev_path);
zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'",
(u_longlong_t)dvd->vdev_guid, dvd->vdev_path);
}
/*
* Our enclosure sysfs path may have changed between imports
*/
old = dvd->vdev_enc_sysfs_path;
new = svd->vdev_enc_sysfs_path;
if ((old != NULL && new == NULL) ||
(old == NULL && new != NULL) ||
((old != NULL && new != NULL) && strcmp(new, old) != 0)) {
zfs_dbgmsg("vdev_copy_path: vdev %llu: vdev_enc_sysfs_path "
"changed from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid,
old, new);
if (dvd->vdev_enc_sysfs_path)
spa_strfree(dvd->vdev_enc_sysfs_path);
if (svd->vdev_enc_sysfs_path) {
dvd->vdev_enc_sysfs_path = spa_strdup(
svd->vdev_enc_sysfs_path);
} else {
dvd->vdev_enc_sysfs_path = NULL;
}
}
}
/*
* Recursively copy vdev paths from one vdev to another. Source and destination
* vdev trees must have same geometry otherwise return error. Intended to copy
* paths from userland config into MOS config.
*/
int
vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd)
{
if ((svd->vdev_ops == &vdev_missing_ops) ||
(svd->vdev_ishole && dvd->vdev_ishole) ||
(dvd->vdev_ops == &vdev_indirect_ops))
return (0);
if (svd->vdev_ops != dvd->vdev_ops) {
vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s",
svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type);
return (SET_ERROR(EINVAL));
}
if (svd->vdev_guid != dvd->vdev_guid) {
vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != "
"%llu)", (u_longlong_t)svd->vdev_guid,
(u_longlong_t)dvd->vdev_guid);
return (SET_ERROR(EINVAL));
}
if (svd->vdev_children != dvd->vdev_children) {
vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: "
"%llu != %llu", (u_longlong_t)svd->vdev_children,
(u_longlong_t)dvd->vdev_children);
return (SET_ERROR(EINVAL));
}
for (uint64_t i = 0; i < svd->vdev_children; i++) {
int error = vdev_copy_path_strict(svd->vdev_child[i],
dvd->vdev_child[i]);
if (error != 0)
return (error);
}
if (svd->vdev_ops->vdev_op_leaf)
vdev_copy_path_impl(svd, dvd);
return (0);
}
static void
vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd)
{
ASSERT(stvd->vdev_top == stvd);
ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id);
for (uint64_t i = 0; i < dvd->vdev_children; i++) {
vdev_copy_path_search(stvd, dvd->vdev_child[i]);
}
if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd))
return;
/*
* The idea here is that while a vdev can shift positions within
* a top vdev (when replacing, attaching mirror, etc.) it cannot
* step outside of it.
*/
vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid);
if (vd == NULL || vd->vdev_ops != dvd->vdev_ops)
return;
ASSERT(vd->vdev_ops->vdev_op_leaf);
vdev_copy_path_impl(vd, dvd);
}
/*
* Recursively copy vdev paths from one root vdev to another. Source and
* destination vdev trees may differ in geometry. For each destination leaf
* vdev, search a vdev with the same guid and top vdev id in the source.
* Intended to copy paths from userland config into MOS config.
*/
void
vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd)
{
uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children);
ASSERT(srvd->vdev_ops == &vdev_root_ops);
ASSERT(drvd->vdev_ops == &vdev_root_ops);
for (uint64_t i = 0; i < children; i++) {
vdev_copy_path_search(srvd->vdev_child[i],
drvd->vdev_child[i]);
}
}
/*
* Close a virtual device.
*/
void
vdev_close(vdev_t *vd)
{
vdev_t *pvd = vd->vdev_parent;
spa_t *spa __maybe_unused = vd->vdev_spa;
ASSERT(vd != NULL);
ASSERT(vd->vdev_open_thread == curthread ||
spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
/*
* If our parent is reopening, then we are as well, unless we are
* going offline.
*/
if (pvd != NULL && pvd->vdev_reopening)
vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline);
vd->vdev_ops->vdev_op_close(vd);
/*
* We record the previous state before we close it, so that if we are
* doing a reopen(), we don't generate FMA ereports if we notice that
* it's still faulted.
*/
vd->vdev_prevstate = vd->vdev_state;
if (vd->vdev_offline)
vd->vdev_state = VDEV_STATE_OFFLINE;
else
vd->vdev_state = VDEV_STATE_CLOSED;
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
}
void
vdev_hold(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_is_root(spa));
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
return;
for (int c = 0; c < vd->vdev_children; c++)
vdev_hold(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_hold != NULL)
vd->vdev_ops->vdev_op_hold(vd);
}
void
vdev_rele(vdev_t *vd)
{
ASSERT(spa_is_root(vd->vdev_spa));
for (int c = 0; c < vd->vdev_children; c++)
vdev_rele(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_rele != NULL)
vd->vdev_ops->vdev_op_rele(vd);
}
/*
* Reopen all interior vdevs and any unopened leaves. We don't actually
* reopen leaf vdevs which had previously been opened as they might deadlock
* on the spa_config_lock. Instead we only obtain the leaf's physical size.
* If the leaf has never been opened then open it, as usual.
*/
void
vdev_reopen(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
/* set the reopening flag unless we're taking the vdev offline */
vd->vdev_reopening = !vd->vdev_offline;
vdev_close(vd);
(void) vdev_open(vd);
/*
* Call vdev_validate() here to make sure we have the same device.
* Otherwise, a device with an invalid label could be successfully
* opened in response to vdev_reopen().
*/
if (vd->vdev_aux) {
(void) vdev_validate_aux(vd);
if (vdev_readable(vd) && vdev_writeable(vd) &&
vd->vdev_aux == &spa->spa_l2cache) {
/*
* In case the vdev is present we should evict all ARC
* buffers and pointers to log blocks and reclaim their
* space before restoring its contents to L2ARC.
*/
if (l2arc_vdev_present(vd)) {
l2arc_rebuild_vdev(vd, B_TRUE);
} else {
l2arc_add_vdev(spa, vd);
}
spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
}
} else {
(void) vdev_validate(vd);
}
/*
* Recheck if resilver is still needed and cancel any
* scheduled resilver if resilver is unneeded.
*/
if (!vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL) &&
spa->spa_async_tasks & SPA_ASYNC_RESILVER) {
mutex_enter(&spa->spa_async_lock);
spa->spa_async_tasks &= ~SPA_ASYNC_RESILVER;
mutex_exit(&spa->spa_async_lock);
}
/*
* Reassess parent vdev's health.
*/
vdev_propagate_state(vd);
}
int
vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
{
int error;
/*
* Normally, partial opens (e.g. of a mirror) are allowed.
* For a create, however, we want to fail the request if
* there are any components we can't open.
*/
error = vdev_open(vd);
if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
vdev_close(vd);
return (error ? error : SET_ERROR(ENXIO));
}
/*
* Recursively load DTLs and initialize all labels.
*/
if ((error = vdev_dtl_load(vd)) != 0 ||
(error = vdev_label_init(vd, txg, isreplacing ?
VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
vdev_close(vd);
return (error);
}
return (0);
}
void
vdev_metaslab_set_size(vdev_t *vd)
{
uint64_t asize = vd->vdev_asize;
uint64_t ms_count = asize >> zfs_vdev_default_ms_shift;
uint64_t ms_shift;
/*
* There are two dimensions to the metaslab sizing calculation:
* the size of the metaslab and the count of metaslabs per vdev.
*
* The default values used below are a good balance between memory
* usage (larger metaslab size means more memory needed for loaded
* metaslabs; more metaslabs means more memory needed for the
* metaslab_t structs), metaslab load time (larger metaslabs take
* longer to load), and metaslab sync time (more metaslabs means
* more time spent syncing all of them).
*
* In general, we aim for zfs_vdev_default_ms_count (200) metaslabs.
* The range of the dimensions are as follows:
*
* 2^29 <= ms_size <= 2^34
* 16 <= ms_count <= 131,072
*
* On the lower end of vdev sizes, we aim for metaslabs sizes of
* at least 512MB (2^29) to minimize fragmentation effects when
* testing with smaller devices. However, the count constraint
* of at least 16 metaslabs will override this minimum size goal.
*
* On the upper end of vdev sizes, we aim for a maximum metaslab
* size of 16GB. However, we will cap the total count to 2^17
* metaslabs to keep our memory footprint in check and let the
* metaslab size grow from there if that limit is hit.
*
* The net effect of applying above constrains is summarized below.
*
* vdev size metaslab count
* --------------|-----------------
* < 8GB ~16
* 8GB - 100GB one per 512MB
* 100GB - 3TB ~200
* 3TB - 2PB one per 16GB
* > 2PB ~131,072
* --------------------------------
*
* Finally, note that all of the above calculate the initial
* number of metaslabs. Expanding a top-level vdev will result
* in additional metaslabs being allocated making it possible
* to exceed the zfs_vdev_ms_count_limit.
*/
if (ms_count < zfs_vdev_min_ms_count)
ms_shift = highbit64(asize / zfs_vdev_min_ms_count);
else if (ms_count > zfs_vdev_default_ms_count)
ms_shift = highbit64(asize / zfs_vdev_default_ms_count);
else
ms_shift = zfs_vdev_default_ms_shift;
if (ms_shift < SPA_MAXBLOCKSHIFT) {
ms_shift = SPA_MAXBLOCKSHIFT;
} else if (ms_shift > zfs_vdev_max_ms_shift) {
ms_shift = zfs_vdev_max_ms_shift;
/* cap the total count to constrain memory footprint */
if ((asize >> ms_shift) > zfs_vdev_ms_count_limit)
ms_shift = highbit64(asize / zfs_vdev_ms_count_limit);
}
vd->vdev_ms_shift = ms_shift;
ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT);
}
void
vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
{
ASSERT(vd == vd->vdev_top);
/* indirect vdevs don't have metaslabs or dtls */
ASSERT(vdev_is_concrete(vd) || flags == 0);
ASSERT(ISP2(flags));
ASSERT(spa_writeable(vd->vdev_spa));
if (flags & VDD_METASLAB)
(void) txg_list_add(&vd->vdev_ms_list, arg, txg);
if (flags & VDD_DTL)
(void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
(void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
}
void
vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
{
for (int c = 0; c < vd->vdev_children; c++)
vdev_dirty_leaves(vd->vdev_child[c], flags, txg);
if (vd->vdev_ops->vdev_op_leaf)
vdev_dirty(vd->vdev_top, flags, vd, txg);
}
/*
* DTLs.
*
* A vdev's DTL (dirty time log) is the set of transaction groups for which
* the vdev has less than perfect replication. There are four kinds of DTL:
*
* DTL_MISSING: txgs for which the vdev has no valid copies of the data
*
* DTL_PARTIAL: txgs for which data is available, but not fully replicated
*
* DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
* scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
* txgs that was scrubbed.
*
* DTL_OUTAGE: txgs which cannot currently be read, whether due to
* persistent errors or just some device being offline.
* Unlike the other three, the DTL_OUTAGE map is not generally
* maintained; it's only computed when needed, typically to
* determine whether a device can be detached.
*
* For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
* either has the data or it doesn't.
*
* For interior vdevs such as mirror and RAID-Z the picture is more complex.
* A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
* if any child is less than fully replicated, then so is its parent.
* A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
* comprising only those txgs which appear in 'maxfaults' or more children;
* those are the txgs we don't have enough replication to read. For example,
* double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
* thus, its DTL_MISSING consists of the set of txgs that appear in more than
* two child DTL_MISSING maps.
*
* It should be clear from the above that to compute the DTLs and outage maps
* for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
* Therefore, that is all we keep on disk. When loading the pool, or after
* a configuration change, we generate all other DTLs from first principles.
*/
void
vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
{
range_tree_t *rt = vd->vdev_dtl[t];
ASSERT(t < DTL_TYPES);
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
ASSERT(spa_writeable(vd->vdev_spa));
mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_contains(rt, txg, size))
range_tree_add(rt, txg, size);
mutex_exit(&vd->vdev_dtl_lock);
}
boolean_t
vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
{
range_tree_t *rt = vd->vdev_dtl[t];
boolean_t dirty = B_FALSE;
ASSERT(t < DTL_TYPES);
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
/*
* While we are loading the pool, the DTLs have not been loaded yet.
* This isn't a problem but it can result in devices being tried
* which are known to not have the data. In which case, the import
* is relying on the checksum to ensure that we get the right data.
* Note that while importing we are only reading the MOS, which is
* always checksummed.
*/
mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_is_empty(rt))
dirty = range_tree_contains(rt, txg, size);
mutex_exit(&vd->vdev_dtl_lock);
return (dirty);
}
boolean_t
vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
{
range_tree_t *rt = vd->vdev_dtl[t];
boolean_t empty;
mutex_enter(&vd->vdev_dtl_lock);
empty = range_tree_is_empty(rt);
mutex_exit(&vd->vdev_dtl_lock);
return (empty);
}
/*
* Check if the txg falls within the range which must be
* resilvered. DVAs outside this range can always be skipped.
*/
boolean_t
vdev_default_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
(void) dva, (void) psize;
/* Set by sequential resilver. */
if (phys_birth == TXG_UNKNOWN)
return (B_TRUE);
return (vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1));
}
/*
* Returns B_TRUE if the vdev determines the DVA needs to be resilvered.
*/
boolean_t
vdev_dtl_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
if (vd->vdev_ops->vdev_op_need_resilver == NULL ||
vd->vdev_ops->vdev_op_leaf)
return (B_TRUE);
return (vd->vdev_ops->vdev_op_need_resilver(vd, dva, psize,
phys_birth));
}
/*
* Returns the lowest txg in the DTL range.
*/
static uint64_t
vdev_dtl_min(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
ASSERT0(vd->vdev_children);
return (range_tree_min(vd->vdev_dtl[DTL_MISSING]) - 1);
}
/*
* Returns the highest txg in the DTL.
*/
static uint64_t
vdev_dtl_max(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
ASSERT0(vd->vdev_children);
return (range_tree_max(vd->vdev_dtl[DTL_MISSING]));
}
/*
* Determine if a resilvering vdev should remove any DTL entries from
* its range. If the vdev was resilvering for the entire duration of the
* scan then it should excise that range from its DTLs. Otherwise, this
* vdev is considered partially resilvered and should leave its DTL
* entries intact. The comment in vdev_dtl_reassess() describes how we
* excise the DTLs.
*/
static boolean_t
vdev_dtl_should_excise(vdev_t *vd, boolean_t rebuild_done)
{
ASSERT0(vd->vdev_children);
if (vd->vdev_state < VDEV_STATE_DEGRADED)
return (B_FALSE);
if (vd->vdev_resilver_deferred)
return (B_FALSE);
if (range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]))
return (B_TRUE);
if (rebuild_done) {
vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
/* Rebuild not initiated by attach */
if (vd->vdev_rebuild_txg == 0)
return (B_TRUE);
/*
* When a rebuild completes without error then all missing data
* up to the rebuild max txg has been reconstructed and the DTL
* is eligible for excision.
*/
if (vrp->vrp_rebuild_state == VDEV_REBUILD_COMPLETE &&
vdev_dtl_max(vd) <= vrp->vrp_max_txg) {
ASSERT3U(vrp->vrp_min_txg, <=, vdev_dtl_min(vd));
ASSERT3U(vrp->vrp_min_txg, <, vd->vdev_rebuild_txg);
ASSERT3U(vd->vdev_rebuild_txg, <=, vrp->vrp_max_txg);
return (B_TRUE);
}
} else {
dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan;
dsl_scan_phys_t *scnp __maybe_unused = &scn->scn_phys;
/* Resilver not initiated by attach */
if (vd->vdev_resilver_txg == 0)
return (B_TRUE);
/*
* When a resilver is initiated the scan will assign the
* scn_max_txg value to the highest txg value that exists
* in all DTLs. If this device's max DTL is not part of this
* scan (i.e. it is not in the range (scn_min_txg, scn_max_txg]
* then it is not eligible for excision.
*/
if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) {
ASSERT3U(scnp->scn_min_txg, <=, vdev_dtl_min(vd));
ASSERT3U(scnp->scn_min_txg, <, vd->vdev_resilver_txg);
ASSERT3U(vd->vdev_resilver_txg, <=, scnp->scn_max_txg);
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* Reassess DTLs after a config change or scrub completion. If txg == 0 no
* write operations will be issued to the pool.
*/
void
vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
boolean_t scrub_done, boolean_t rebuild_done)
{
spa_t *spa = vd->vdev_spa;
avl_tree_t reftree;
int minref;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
for (int c = 0; c < vd->vdev_children; c++)
vdev_dtl_reassess(vd->vdev_child[c], txg,
scrub_txg, scrub_done, rebuild_done);
if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux)
return;
if (vd->vdev_ops->vdev_op_leaf) {
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
boolean_t check_excise = B_FALSE;
boolean_t wasempty = B_TRUE;
mutex_enter(&vd->vdev_dtl_lock);
/*
* If requested, pretend the scan or rebuild completed cleanly.
*/
if (zfs_scan_ignore_errors) {
if (scn != NULL)
scn->scn_phys.scn_errors = 0;
if (vr != NULL)
vr->vr_rebuild_phys.vrp_errors = 0;
}
if (scrub_txg != 0 &&
!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
wasempty = B_FALSE;
zfs_dbgmsg("guid:%llu txg:%llu scrub:%llu started:%d "
"dtl:%llu/%llu errors:%llu",
(u_longlong_t)vd->vdev_guid, (u_longlong_t)txg,
(u_longlong_t)scrub_txg, spa->spa_scrub_started,
(u_longlong_t)vdev_dtl_min(vd),
(u_longlong_t)vdev_dtl_max(vd),
(u_longlong_t)(scn ? scn->scn_phys.scn_errors : 0));
}
/*
* If we've completed a scrub/resilver or a rebuild cleanly
* then determine if this vdev should remove any DTLs. We
* only want to excise regions on vdevs that were available
* during the entire duration of this scan.
*/
if (rebuild_done &&
vr != NULL && vr->vr_rebuild_phys.vrp_errors == 0) {
check_excise = B_TRUE;
} else {
if (spa->spa_scrub_started ||
(scn != NULL && scn->scn_phys.scn_errors == 0)) {
check_excise = B_TRUE;
}
}
if (scrub_txg && check_excise &&
vdev_dtl_should_excise(vd, rebuild_done)) {
/*
* We completed a scrub, resilver or rebuild up to
* scrub_txg. If we did it without rebooting, then
* the scrub dtl will be valid, so excise the old
* region and fold in the scrub dtl. Otherwise,
* leave the dtl as-is if there was an error.
*
* There's little trick here: to excise the beginning
* of the DTL_MISSING map, we put it into a reference
* tree and then add a segment with refcnt -1 that
* covers the range [0, scrub_txg). This means
* that each txg in that range has refcnt -1 or 0.
* We then add DTL_SCRUB with a refcnt of 2, so that
* entries in the range [0, scrub_txg) will have a
* positive refcnt -- either 1 or 2. We then convert
* the reference tree into the new DTL_MISSING map.
*/
space_reftree_create(&reftree);
space_reftree_add_map(&reftree,
vd->vdev_dtl[DTL_MISSING], 1);
space_reftree_add_seg(&reftree, 0, scrub_txg, -1);
space_reftree_add_map(&reftree,
vd->vdev_dtl[DTL_SCRUB], 2);
space_reftree_generate_map(&reftree,
vd->vdev_dtl[DTL_MISSING], 1);
space_reftree_destroy(&reftree);
if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
zfs_dbgmsg("update DTL_MISSING:%llu/%llu",
(u_longlong_t)vdev_dtl_min(vd),
(u_longlong_t)vdev_dtl_max(vd));
} else if (!wasempty) {
zfs_dbgmsg("DTL_MISSING is now empty");
}
}
range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
range_tree_walk(vd->vdev_dtl[DTL_MISSING],
range_tree_add, vd->vdev_dtl[DTL_PARTIAL]);
if (scrub_done)
range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
if (!vdev_readable(vd))
range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
else
range_tree_walk(vd->vdev_dtl[DTL_MISSING],
range_tree_add, vd->vdev_dtl[DTL_OUTAGE]);
/*
* If the vdev was resilvering or rebuilding and no longer
* has any DTLs then reset the appropriate flag and dirty
* the top level so that we persist the change.
*/
if (txg != 0 &&
range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) {
if (vd->vdev_rebuild_txg != 0) {
vd->vdev_rebuild_txg = 0;
vdev_config_dirty(vd->vdev_top);
} else if (vd->vdev_resilver_txg != 0) {
vd->vdev_resilver_txg = 0;
vdev_config_dirty(vd->vdev_top);
}
}
mutex_exit(&vd->vdev_dtl_lock);
if (txg != 0)
vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
return;
}
mutex_enter(&vd->vdev_dtl_lock);
for (int t = 0; t < DTL_TYPES; t++) {
/* account for child's outage in parent's missing map */
int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
if (t == DTL_SCRUB)
continue; /* leaf vdevs only */
if (t == DTL_PARTIAL)
minref = 1; /* i.e. non-zero */
else if (vdev_get_nparity(vd) != 0)
minref = vdev_get_nparity(vd) + 1; /* RAID-Z, dRAID */
else
minref = vd->vdev_children; /* any kind of mirror */
space_reftree_create(&reftree);
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
mutex_enter(&cvd->vdev_dtl_lock);
space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1);
mutex_exit(&cvd->vdev_dtl_lock);
}
space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref);
space_reftree_destroy(&reftree);
}
mutex_exit(&vd->vdev_dtl_lock);
}
/*
* Iterate over all the vdevs except spare, and post kobj events
*/
void
vdev_post_kobj_evt(vdev_t *vd)
{
if (vd->vdev_ops->vdev_op_kobj_evt_post &&
vd->vdev_kobj_flag == B_FALSE) {
vd->vdev_kobj_flag = B_TRUE;
vd->vdev_ops->vdev_op_kobj_evt_post(vd);
}
for (int c = 0; c < vd->vdev_children; c++)
vdev_post_kobj_evt(vd->vdev_child[c]);
}
/*
* Iterate over all the vdevs except spare, and clear kobj events
*/
void
vdev_clear_kobj_evt(vdev_t *vd)
{
vd->vdev_kobj_flag = B_FALSE;
for (int c = 0; c < vd->vdev_children; c++)
vdev_clear_kobj_evt(vd->vdev_child[c]);
}
int
vdev_dtl_load(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
range_tree_t *rt;
int error = 0;
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
ASSERT(vdev_is_concrete(vd));
/*
* If the dtl cannot be sync'd there is no need to open it.
*/
if (spa->spa_mode == SPA_MODE_READ && !spa->spa_read_spacemaps)
return (0);
error = space_map_open(&vd->vdev_dtl_sm, mos,
vd->vdev_dtl_object, 0, -1ULL, 0);
if (error)
return (error);
ASSERT(vd->vdev_dtl_sm != NULL);
rt = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
error = space_map_load(vd->vdev_dtl_sm, rt, SM_ALLOC);
if (error == 0) {
mutex_enter(&vd->vdev_dtl_lock);
range_tree_walk(rt, range_tree_add,
vd->vdev_dtl[DTL_MISSING]);
mutex_exit(&vd->vdev_dtl_lock);
}
range_tree_vacate(rt, NULL, NULL);
range_tree_destroy(rt);
return (error);
}
for (int c = 0; c < vd->vdev_children; c++) {
error = vdev_dtl_load(vd->vdev_child[c]);
if (error != 0)
break;
}
return (error);
}
static void
vdev_zap_allocation_data(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
const char *string;
ASSERT(alloc_bias != VDEV_BIAS_NONE);
string =
(alloc_bias == VDEV_BIAS_LOG) ? VDEV_ALLOC_BIAS_LOG :
(alloc_bias == VDEV_BIAS_SPECIAL) ? VDEV_ALLOC_BIAS_SPECIAL :
(alloc_bias == VDEV_BIAS_DEDUP) ? VDEV_ALLOC_BIAS_DEDUP : NULL;
ASSERT(string != NULL);
VERIFY0(zap_add(mos, vd->vdev_top_zap, VDEV_TOP_ZAP_ALLOCATION_BIAS,
1, strlen(string) + 1, string, tx));
if (alloc_bias == VDEV_BIAS_SPECIAL || alloc_bias == VDEV_BIAS_DEDUP) {
spa_activate_allocation_classes(spa, tx);
}
}
void
vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx));
VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
zapobj, tx));
}
uint64_t
vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA,
DMU_OT_NONE, 0, tx);
ASSERT(zap != 0);
VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
zap, tx));
return (zap);
}
void
vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
{
if (vd->vdev_ops != &vdev_hole_ops &&
vd->vdev_ops != &vdev_missing_ops &&
vd->vdev_ops != &vdev_root_ops &&
!vd->vdev_top->vdev_removing) {
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) {
vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx);
}
if (vd == vd->vdev_top && vd->vdev_top_zap == 0) {
vd->vdev_top_zap = vdev_create_link_zap(vd, tx);
if (vd->vdev_alloc_bias != VDEV_BIAS_NONE)
vdev_zap_allocation_data(vd, tx);
}
}
if (vd->vdev_ops == &vdev_root_ops && vd->vdev_root_zap == 0 &&
spa_feature_is_enabled(vd->vdev_spa, SPA_FEATURE_AVZ_V2)) {
if (!spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2))
spa_feature_incr(vd->vdev_spa, SPA_FEATURE_AVZ_V2, tx);
vd->vdev_root_zap = vdev_create_link_zap(vd, tx);
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_construct_zaps(vd->vdev_child[i], tx);
}
}
static void
vdev_dtl_sync(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
objset_t *mos = spa->spa_meta_objset;
range_tree_t *rtsync;
dmu_tx_t *tx;
uint64_t object = space_map_object(vd->vdev_dtl_sm);
ASSERT(vdev_is_concrete(vd));
ASSERT(vd->vdev_ops->vdev_op_leaf);
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
if (vd->vdev_detached || vd->vdev_top->vdev_removing) {
mutex_enter(&vd->vdev_dtl_lock);
space_map_free(vd->vdev_dtl_sm, tx);
space_map_close(vd->vdev_dtl_sm);
vd->vdev_dtl_sm = NULL;
mutex_exit(&vd->vdev_dtl_lock);
/*
* We only destroy the leaf ZAP for detached leaves or for
* removed log devices. Removed data devices handle leaf ZAP
* cleanup later, once cancellation is no longer possible.
*/
if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached ||
vd->vdev_top->vdev_islog)) {
vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx);
vd->vdev_leaf_zap = 0;
}
dmu_tx_commit(tx);
return;
}
if (vd->vdev_dtl_sm == NULL) {
uint64_t new_object;
new_object = space_map_alloc(mos, zfs_vdev_dtl_sm_blksz, tx);
VERIFY3U(new_object, !=, 0);
VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object,
0, -1ULL, 0));
ASSERT(vd->vdev_dtl_sm != NULL);
}
rtsync = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
mutex_enter(&vd->vdev_dtl_lock);
range_tree_walk(rt, range_tree_add, rtsync);
mutex_exit(&vd->vdev_dtl_lock);
space_map_truncate(vd->vdev_dtl_sm, zfs_vdev_dtl_sm_blksz, tx);
space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx);
range_tree_vacate(rtsync, NULL, NULL);
range_tree_destroy(rtsync);
/*
* If the object for the space map has changed then dirty
* the top level so that we update the config.
*/
if (object != space_map_object(vd->vdev_dtl_sm)) {
vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, "
"new object %llu", (u_longlong_t)txg, spa_name(spa),
(u_longlong_t)object,
(u_longlong_t)space_map_object(vd->vdev_dtl_sm));
vdev_config_dirty(vd->vdev_top);
}
dmu_tx_commit(tx);
}
/*
* Determine whether the specified vdev can be offlined/detached/removed
* without losing data.
*/
boolean_t
vdev_dtl_required(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *tvd = vd->vdev_top;
uint8_t cant_read = vd->vdev_cant_read;
boolean_t required;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
if (vd == spa->spa_root_vdev || vd == tvd)
return (B_TRUE);
/*
* Temporarily mark the device as unreadable, and then determine
* whether this results in any DTL outages in the top-level vdev.
* If not, we can safely offline/detach/remove the device.
*/
vd->vdev_cant_read = B_TRUE;
vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE);
required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
vd->vdev_cant_read = cant_read;
vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE);
if (!required && zio_injection_enabled) {
required = !!zio_handle_device_injection(vd, NULL,
SET_ERROR(ECHILD));
}
return (required);
}
/*
* Determine if resilver is needed, and if so the txg range.
*/
boolean_t
vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
{
boolean_t needed = B_FALSE;
uint64_t thismin = UINT64_MAX;
uint64_t thismax = 0;
if (vd->vdev_children == 0) {
mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
vdev_writeable(vd)) {
thismin = vdev_dtl_min(vd);
thismax = vdev_dtl_max(vd);
needed = B_TRUE;
}
mutex_exit(&vd->vdev_dtl_lock);
} else {
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
uint64_t cmin, cmax;
if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
thismin = MIN(thismin, cmin);
thismax = MAX(thismax, cmax);
needed = B_TRUE;
}
}
}
if (needed && minp) {
*minp = thismin;
*maxp = thismax;
}
return (needed);
}
/*
* Gets the checkpoint space map object from the vdev's ZAP. On success sm_obj
* will contain either the checkpoint spacemap object or zero if none exists.
* All other errors are returned to the caller.
*/
int
vdev_checkpoint_sm_object(vdev_t *vd, uint64_t *sm_obj)
{
ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
if (vd->vdev_top_zap == 0) {
*sm_obj = 0;
return (0);
}
int error = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, sm_obj);
if (error == ENOENT) {
*sm_obj = 0;
error = 0;
}
return (error);
}
int
vdev_load(vdev_t *vd)
{
int children = vd->vdev_children;
int error = 0;
taskq_t *tq = NULL;
/*
* It's only worthwhile to use the taskq for the root vdev, because the
* slow part is metaslab_init, and that only happens for top-level
* vdevs.
*/
if (vd->vdev_ops == &vdev_root_ops && vd->vdev_children > 0) {
tq = taskq_create("vdev_load", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
}
/*
* Recursively load all children.
*/
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (tq == NULL || vdev_uses_zvols(cvd)) {
cvd->vdev_load_error = vdev_load(cvd);
} else {
VERIFY(taskq_dispatch(tq, vdev_load_child,
cvd, TQ_SLEEP) != TASKQID_INVALID);
}
}
if (tq != NULL) {
taskq_wait(tq);
taskq_destroy(tq);
}
for (int c = 0; c < vd->vdev_children; c++) {
int error = vd->vdev_child[c]->vdev_load_error;
if (error != 0)
return (error);
}
vdev_set_deflate_ratio(vd);
/*
* On spa_load path, grab the allocation bias from our zap
*/
if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
spa_t *spa = vd->vdev_spa;
char bias_str[64];
error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_ALLOCATION_BIAS, 1, sizeof (bias_str),
bias_str);
if (error == 0) {
ASSERT(vd->vdev_alloc_bias == VDEV_BIAS_NONE);
vd->vdev_alloc_bias = vdev_derive_alloc_bias(bias_str);
} else if (error != ENOENT) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: zap_lookup(top_zap=%llu) "
"failed [error=%d]",
(u_longlong_t)vd->vdev_top_zap, error);
return (error);
}
}
if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
spa_t *spa = vd->vdev_spa;
uint64_t failfast;
error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
vdev_prop_to_name(VDEV_PROP_FAILFAST), sizeof (failfast),
1, &failfast);
if (error == 0) {
vd->vdev_failfast = failfast & 1;
} else if (error == ENOENT) {
vd->vdev_failfast = vdev_prop_default_numeric(
VDEV_PROP_FAILFAST);
} else {
vdev_dbgmsg(vd,
"vdev_load: zap_lookup(top_zap=%llu) "
"failed [error=%d]",
(u_longlong_t)vd->vdev_top_zap, error);
}
}
/*
* Load any rebuild state from the top-level vdev zap.
*/
if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
error = vdev_rebuild_load(vd);
if (error && error != ENOTSUP) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: vdev_rebuild_load "
"failed [error=%d]", error);
return (error);
}
}
if (vd->vdev_top_zap != 0 || vd->vdev_leaf_zap != 0) {
uint64_t zapobj;
if (vd->vdev_top_zap != 0)
zapobj = vd->vdev_top_zap;
else
zapobj = vd->vdev_leaf_zap;
error = vdev_prop_get_int(vd, VDEV_PROP_CHECKSUM_N,
&vd->vdev_checksum_n);
if (error && error != ENOENT)
vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
"failed [error=%d]", (u_longlong_t)zapobj, error);
error = vdev_prop_get_int(vd, VDEV_PROP_CHECKSUM_T,
&vd->vdev_checksum_t);
if (error && error != ENOENT)
vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
"failed [error=%d]", (u_longlong_t)zapobj, error);
error = vdev_prop_get_int(vd, VDEV_PROP_IO_N,
&vd->vdev_io_n);
if (error && error != ENOENT)
vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
"failed [error=%d]", (u_longlong_t)zapobj, error);
error = vdev_prop_get_int(vd, VDEV_PROP_IO_T,
&vd->vdev_io_t);
if (error && error != ENOENT)
vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
"failed [error=%d]", (u_longlong_t)zapobj, error);
}
/*
* If this is a top-level vdev, initialize its metaslabs.
*/
if (vd == vd->vdev_top && vdev_is_concrete(vd)) {
vdev_metaslab_group_create(vd);
if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, "
"asize=%llu", (u_longlong_t)vd->vdev_ashift,
(u_longlong_t)vd->vdev_asize);
return (SET_ERROR(ENXIO));
}
error = vdev_metaslab_init(vd, 0);
if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: metaslab_init failed "
"[error=%d]", error);
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
return (error);
}
uint64_t checkpoint_sm_obj;
error = vdev_checkpoint_sm_object(vd, &checkpoint_sm_obj);
if (error == 0 && checkpoint_sm_obj != 0) {
objset_t *mos = spa_meta_objset(vd->vdev_spa);
ASSERT(vd->vdev_asize != 0);
ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL);
error = space_map_open(&vd->vdev_checkpoint_sm,
mos, checkpoint_sm_obj, 0, vd->vdev_asize,
vd->vdev_ashift);
if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: space_map_open "
"failed for checkpoint spacemap (obj %llu) "
"[error=%d]",
(u_longlong_t)checkpoint_sm_obj, error);
return (error);
}
ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
/*
* Since the checkpoint_sm contains free entries
* exclusively we can use space_map_allocated() to
* indicate the cumulative checkpointed space that
* has been freed.
*/
vd->vdev_stat.vs_checkpoint_space =
-space_map_allocated(vd->vdev_checkpoint_sm);
vd->vdev_spa->spa_checkpoint_info.sci_dspace +=
vd->vdev_stat.vs_checkpoint_space;
} else if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: failed to retrieve "
"checkpoint space map object from vdev ZAP "
"[error=%d]", error);
return (error);
}
}
/*
* If this is a leaf vdev, load its DTL.
*/
if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed "
"[error=%d]", error);
return (error);
}
uint64_t obsolete_sm_object;
error = vdev_obsolete_sm_object(vd, &obsolete_sm_object);
if (error == 0 && obsolete_sm_object != 0) {
objset_t *mos = vd->vdev_spa->spa_meta_objset;
ASSERT(vd->vdev_asize != 0);
ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
if ((error = space_map_open(&vd->vdev_obsolete_sm, mos,
obsolete_sm_object, 0, vd->vdev_asize, 0))) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: space_map_open failed for "
"obsolete spacemap (obj %llu) [error=%d]",
(u_longlong_t)obsolete_sm_object, error);
return (error);
}
} else if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: failed to retrieve obsolete "
"space map object from vdev ZAP [error=%d]", error);
return (error);
}
return (0);
}
/*
* The special vdev case is used for hot spares and l2cache devices. Its
* sole purpose it to set the vdev state for the associated vdev. To do this,
* we make sure that we can open the underlying device, then try to read the
* label, and make sure that the label is sane and that it hasn't been
* repurposed to another pool.
*/
int
vdev_validate_aux(vdev_t *vd)
{
nvlist_t *label;
uint64_t guid, version;
uint64_t state;
if (!vdev_readable(vd))
return (0);
if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
return (-1);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
!SPA_VERSION_IS_SUPPORTED(version) ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
guid != vd->vdev_guid ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
return (-1);
}
/*
* We don't actually check the pool state here. If it's in fact in
* use by another pool, we update this fact on the fly when requested.
*/
nvlist_free(label);
return (0);
}
static void
vdev_destroy_ms_flush_data(vdev_t *vd, dmu_tx_t *tx)
{
objset_t *mos = spa_meta_objset(vd->vdev_spa);
if (vd->vdev_top_zap == 0)
return;
uint64_t object = 0;
int err = zap_lookup(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, &object);
if (err == ENOENT)
return;
VERIFY0(err);
VERIFY0(dmu_object_free(mos, object, tx));
VERIFY0(zap_remove(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, tx));
}
/*
* Free the objects used to store this vdev's spacemaps, and the array
* that points to them.
*/
void
vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx)
{
if (vd->vdev_ms_array == 0)
return;
objset_t *mos = vd->vdev_spa->spa_meta_objset;
uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift;
size_t array_bytes = array_count * sizeof (uint64_t);
uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP);
VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0,
array_bytes, smobj_array, 0));
for (uint64_t i = 0; i < array_count; i++) {
uint64_t smobj = smobj_array[i];
if (smobj == 0)
continue;
space_map_free_obj(mos, smobj, tx);
}
kmem_free(smobj_array, array_bytes);
VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx));
vdev_destroy_ms_flush_data(vd, tx);
vd->vdev_ms_array = 0;
}
static void
vdev_remove_empty_log(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
ASSERT(vd->vdev_islog);
ASSERT(vd == vd->vdev_top);
ASSERT3U(txg, ==, spa_syncing_txg(spa));
dmu_tx_t *tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
vdev_destroy_spacemaps(vd, tx);
if (vd->vdev_top_zap != 0) {
vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx);
vd->vdev_top_zap = 0;
}
dmu_tx_commit(tx);
}
void
vdev_sync_done(vdev_t *vd, uint64_t txg)
{
metaslab_t *msp;
boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
ASSERT(vdev_is_concrete(vd));
while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
!= NULL)
metaslab_sync_done(msp, txg);
if (reassess) {
metaslab_sync_reassess(vd->vdev_mg);
if (vd->vdev_log_mg != NULL)
metaslab_sync_reassess(vd->vdev_log_mg);
}
}
void
vdev_sync(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
vdev_t *lvd;
metaslab_t *msp;
ASSERT3U(txg, ==, spa->spa_syncing_txg);
dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
if (range_tree_space(vd->vdev_obsolete_segments) > 0) {
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
vdev_indirect_sync_obsolete(vd, tx);
/*
* If the vdev is indirect, it can't have dirty
* metaslabs or DTLs.
*/
if (vd->vdev_ops == &vdev_indirect_ops) {
ASSERT(txg_list_empty(&vd->vdev_ms_list, txg));
ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg));
dmu_tx_commit(tx);
return;
}
}
ASSERT(vdev_is_concrete(vd));
if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 &&
!vd->vdev_removing) {
ASSERT(vd == vd->vdev_top);
ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
ASSERT(vd->vdev_ms_array != 0);
vdev_config_dirty(vd);
}
while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
metaslab_sync(msp, txg);
(void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
}
while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
vdev_dtl_sync(lvd, txg);
/*
* If this is an empty log device being removed, destroy the
* metadata associated with it.
*/
if (vd->vdev_islog && vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing)
vdev_remove_empty_log(vd, txg);
(void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
dmu_tx_commit(tx);
}
uint64_t
vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
{
return (vd->vdev_ops->vdev_op_asize(vd, psize));
}
/*
* Mark the given vdev faulted. A faulted vdev behaves as if the device could
* not be opened, and no I/O is attempted.
*/
int
vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
{
vdev_t *vd, *tvd;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
tvd = vd->vdev_top;
/*
* If user did a 'zpool offline -f' then make the fault persist across
* reboots.
*/
if (aux == VDEV_AUX_EXTERNAL_PERSIST) {
/*
* There are two kinds of forced faults: temporary and
* persistent. Temporary faults go away at pool import, while
* persistent faults stay set. Both types of faults can be
* cleared with a zpool clear.
*
* We tell if a vdev is persistently faulted by looking at the
* ZPOOL_CONFIG_AUX_STATE nvpair. If it's set to "external" at
* import then it's a persistent fault. Otherwise, it's
* temporary. We get ZPOOL_CONFIG_AUX_STATE set to "external"
* by setting vd.vdev_stat.vs_aux to VDEV_AUX_EXTERNAL. This
* tells vdev_config_generate() (which gets run later) to set
* ZPOOL_CONFIG_AUX_STATE to "external" in the nvlist.
*/
vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
vd->vdev_tmpoffline = B_FALSE;
aux = VDEV_AUX_EXTERNAL;
} else {
vd->vdev_tmpoffline = B_TRUE;
}
/*
* We don't directly use the aux state here, but if we do a
* vdev_reopen(), we need this value to be present to remember why we
* were faulted.
*/
vd->vdev_label_aux = aux;
/*
* Faulted state takes precedence over degraded.
*/
vd->vdev_delayed_close = B_FALSE;
vd->vdev_faulted = 1ULL;
vd->vdev_degraded = 0ULL;
vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux);
/*
* If this device has the only valid copy of the data, then
* back off and simply mark the vdev as degraded instead.
*/
if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
vd->vdev_degraded = 1ULL;
vd->vdev_faulted = 0ULL;
/*
* If we reopen the device and it's not dead, only then do we
* mark it degraded.
*/
vdev_reopen(tvd);
if (vdev_readable(vd))
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
}
return (spa_vdev_state_exit(spa, vd, 0));
}
/*
* Mark the given vdev degraded. A degraded vdev is purely an indication to the
* user that something is wrong. The vdev continues to operate as normal as far
* as I/O is concerned.
*/
int
vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
{
vdev_t *vd;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
/*
* If the vdev is already faulted, then don't do anything.
*/
if (vd->vdev_faulted || vd->vdev_degraded)
return (spa_vdev_state_exit(spa, NULL, 0));
vd->vdev_degraded = 1ULL;
if (!vdev_is_dead(vd))
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
aux);
return (spa_vdev_state_exit(spa, vd, 0));
}
int
vdev_remove_wanted(spa_t *spa, uint64_t guid)
{
vdev_t *vd;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
/*
* If the vdev is already removed, or expanding which can trigger
* repartition add/remove events, then don't do anything.
*/
if (vd->vdev_removed || vd->vdev_expanding)
return (spa_vdev_state_exit(spa, NULL, 0));
/*
* Confirm the vdev has been removed, otherwise don't do anything.
*/
if (vd->vdev_ops->vdev_op_leaf && !zio_wait(vdev_probe(vd, NULL)))
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(EEXIST)));
vd->vdev_remove_wanted = B_TRUE;
spa_async_request(spa, SPA_ASYNC_REMOVE);
return (spa_vdev_state_exit(spa, vd, 0));
}
/*
* Online the given vdev.
*
* If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached
* spare device should be detached when the device finishes resilvering.
* Second, the online should be treated like a 'test' online case, so no FMA
* events are generated if the device fails to open.
*/
int
vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
{
vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
boolean_t wasoffline;
vdev_state_t oldstate;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline);
oldstate = vd->vdev_state;
tvd = vd->vdev_top;
vd->vdev_offline = B_FALSE;
vd->vdev_tmpoffline = B_FALSE;
vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
/* XXX - L2ARC 1.0 does not support expansion */
if (!vd->vdev_aux) {
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
pvd->vdev_expanding = !!((flags & ZFS_ONLINE_EXPAND) ||
spa->spa_autoexpand);
vd->vdev_expansion_time = gethrestime_sec();
}
vdev_reopen(tvd);
vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
if (!vd->vdev_aux) {
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
pvd->vdev_expanding = B_FALSE;
}
if (newstate)
*newstate = vd->vdev_state;
if ((flags & ZFS_ONLINE_UNSPARE) &&
!vdev_is_dead(vd) && vd->vdev_parent &&
vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
vd->vdev_parent->vdev_child[0] == vd)
vd->vdev_unspare = B_TRUE;
if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) {
/* XXX - L2ARC 1.0 does not support expansion */
if (vd->vdev_aux)
return (spa_vdev_state_exit(spa, vd, ENOTSUP));
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
/* Restart initializing if necessary */
mutex_enter(&vd->vdev_initialize_lock);
if (vdev_writeable(vd) &&
vd->vdev_initialize_thread == NULL &&
vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) {
(void) vdev_initialize(vd);
}
mutex_exit(&vd->vdev_initialize_lock);
/*
* Restart trimming if necessary. We do not restart trimming for cache
* devices here. This is triggered by l2arc_rebuild_vdev()
* asynchronously for the whole device or in l2arc_evict() as it evicts
* space for upcoming writes.
*/
mutex_enter(&vd->vdev_trim_lock);
if (vdev_writeable(vd) && !vd->vdev_isl2cache &&
vd->vdev_trim_thread == NULL &&
vd->vdev_trim_state == VDEV_TRIM_ACTIVE) {
(void) vdev_trim(vd, vd->vdev_trim_rate, vd->vdev_trim_partial,
vd->vdev_trim_secure);
}
mutex_exit(&vd->vdev_trim_lock);
if (wasoffline ||
(oldstate < VDEV_STATE_DEGRADED &&
vd->vdev_state >= VDEV_STATE_DEGRADED)) {
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE);
/*
* Asynchronously detach spare vdev if resilver or
* rebuild is not required
*/
if (vd->vdev_unspare &&
!dsl_scan_resilvering(spa->spa_dsl_pool) &&
!dsl_scan_resilver_scheduled(spa->spa_dsl_pool) &&
!vdev_rebuild_active(tvd))
spa_async_request(spa, SPA_ASYNC_DETACH_SPARE);
}
return (spa_vdev_state_exit(spa, vd, 0));
}
static int
vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags)
{
vdev_t *vd, *tvd;
int error = 0;
uint64_t generation;
metaslab_group_t *mg;
top:
spa_vdev_state_enter(spa, SCL_ALLOC);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
if (vd->vdev_ops == &vdev_draid_spare_ops)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
tvd = vd->vdev_top;
mg = tvd->vdev_mg;
generation = spa->spa_config_generation + 1;
/*
* If the device isn't already offline, try to offline it.
*/
if (!vd->vdev_offline) {
/*
* If this device has the only valid copy of some data,
* don't allow it to be offlined. Log devices are always
* expendable.
*/
if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
vdev_dtl_required(vd))
return (spa_vdev_state_exit(spa, NULL,
SET_ERROR(EBUSY)));
/*
* If the top-level is a slog and it has had allocations
* then proceed. We check that the vdev's metaslab group
* is not NULL since it's possible that we may have just
* added this vdev but not yet initialized its metaslabs.
*/
if (tvd->vdev_islog && mg != NULL) {
/*
* Prevent any future allocations.
*/
ASSERT3P(tvd->vdev_log_mg, ==, NULL);
metaslab_group_passivate(mg);
(void) spa_vdev_state_exit(spa, vd, 0);
error = spa_reset_logs(spa);
/*
* If the log device was successfully reset but has
* checkpointed data, do not offline it.
*/
if (error == 0 &&
tvd->vdev_checkpoint_sm != NULL) {
ASSERT3U(space_map_allocated(
tvd->vdev_checkpoint_sm), !=, 0);
error = ZFS_ERR_CHECKPOINT_EXISTS;
}
spa_vdev_state_enter(spa, SCL_ALLOC);
/*
* Check to see if the config has changed.
*/
if (error || generation != spa->spa_config_generation) {
metaslab_group_activate(mg);
if (error)
return (spa_vdev_state_exit(spa,
vd, error));
(void) spa_vdev_state_exit(spa, vd, 0);
goto top;
}
ASSERT0(tvd->vdev_stat.vs_alloc);
}
/*
* Offline this device and reopen its top-level vdev.
* If the top-level vdev is a log device then just offline
* it. Otherwise, if this action results in the top-level
* vdev becoming unusable, undo it and fail the request.
*/
vd->vdev_offline = B_TRUE;
vdev_reopen(tvd);
if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
vdev_is_dead(tvd)) {
vd->vdev_offline = B_FALSE;
vdev_reopen(tvd);
return (spa_vdev_state_exit(spa, NULL,
SET_ERROR(EBUSY)));
}
/*
* Add the device back into the metaslab rotor so that
* once we online the device it's open for business.
*/
if (tvd->vdev_islog && mg != NULL)
metaslab_group_activate(mg);
}
vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
return (spa_vdev_state_exit(spa, vd, 0));
}
int
vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
{
int error;
mutex_enter(&spa->spa_vdev_top_lock);
error = vdev_offline_locked(spa, guid, flags);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* Clear the error counts associated with this vdev. Unlike vdev_online() and
* vdev_offline(), we assume the spa config is locked. We also clear all
* children. If 'vd' is NULL, then the user wants to clear all vdevs.
*/
void
vdev_clear(spa_t *spa, vdev_t *vd)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
if (vd == NULL)
vd = rvd;
vd->vdev_stat.vs_read_errors = 0;
vd->vdev_stat.vs_write_errors = 0;
vd->vdev_stat.vs_checksum_errors = 0;
vd->vdev_stat.vs_slow_ios = 0;
for (int c = 0; c < vd->vdev_children; c++)
vdev_clear(spa, vd->vdev_child[c]);
/*
* It makes no sense to "clear" an indirect or removed vdev.
*/
if (!vdev_is_concrete(vd) || vd->vdev_removed)
return;
/*
* If we're in the FAULTED state or have experienced failed I/O, then
* clear the persistent state and attempt to reopen the device. We
* also mark the vdev config dirty, so that the new faulted state is
* written out to disk.
*/
if (vd->vdev_faulted || vd->vdev_degraded ||
!vdev_readable(vd) || !vdev_writeable(vd)) {
/*
* When reopening in response to a clear event, it may be due to
* a fmadm repair request. In this case, if the device is
* still broken, we want to still post the ereport again.
*/
vd->vdev_forcefault = B_TRUE;
vd->vdev_faulted = vd->vdev_degraded = 0ULL;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
vd->vdev_stat.vs_aux = 0;
vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
vd->vdev_forcefault = B_FALSE;
if (vd != rvd && vdev_writeable(vd->vdev_top))
vdev_state_dirty(vd->vdev_top);
/* If a resilver isn't required, check if vdevs can be culled */
if (vd->vdev_aux == NULL && !vdev_is_dead(vd) &&
!dsl_scan_resilvering(spa->spa_dsl_pool) &&
!dsl_scan_resilver_scheduled(spa->spa_dsl_pool))
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR);
}
/*
* When clearing a FMA-diagnosed fault, we always want to
* unspare the device, as we assume that the original spare was
* done in response to the FMA fault.
*/
if (!vdev_is_dead(vd) && vd->vdev_parent != NULL &&
vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
vd->vdev_parent->vdev_child[0] == vd)
vd->vdev_unspare = B_TRUE;
/* Clear recent error events cache (i.e. duplicate events tracking) */
zfs_ereport_clear(spa, vd);
}
boolean_t
vdev_is_dead(vdev_t *vd)
{
/*
* Holes and missing devices are always considered "dead".
* This simplifies the code since we don't have to check for
* these types of devices in the various code paths.
* Instead we rely on the fact that we skip over dead devices
* before issuing I/O to them.
*/
return (vd->vdev_state < VDEV_STATE_DEGRADED ||
vd->vdev_ops == &vdev_hole_ops ||
vd->vdev_ops == &vdev_missing_ops);
}
boolean_t
vdev_readable(vdev_t *vd)
{
return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
}
boolean_t
vdev_writeable(vdev_t *vd)
{
return (!vdev_is_dead(vd) && !vd->vdev_cant_write &&
vdev_is_concrete(vd));
}
boolean_t
vdev_allocatable(vdev_t *vd)
{
uint64_t state = vd->vdev_state;
/*
* We currently allow allocations from vdevs which may be in the
* process of reopening (i.e. VDEV_STATE_CLOSED). If the device
* fails to reopen then we'll catch it later when we're holding
* the proper locks. Note that we have to get the vdev state
* in a local variable because although it changes atomically,
* we're asking two separate questions about it.
*/
return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
!vd->vdev_cant_write && vdev_is_concrete(vd) &&
vd->vdev_mg->mg_initialized);
}
boolean_t
vdev_accessible(vdev_t *vd, zio_t *zio)
{
ASSERT(zio->io_vd == vd);
if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
return (B_FALSE);
if (zio->io_type == ZIO_TYPE_READ)
return (!vd->vdev_cant_read);
if (zio->io_type == ZIO_TYPE_WRITE)
return (!vd->vdev_cant_write);
return (B_TRUE);
}
static void
vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs)
{
/*
* Exclude the dRAID spare when aggregating to avoid double counting
* the ops and bytes. These IOs are counted by the physical leaves.
*/
if (cvd->vdev_ops == &vdev_draid_spare_ops)
return;
for (int t = 0; t < VS_ZIO_TYPES; t++) {
vs->vs_ops[t] += cvs->vs_ops[t];
vs->vs_bytes[t] += cvs->vs_bytes[t];
}
cvs->vs_scan_removing = cvd->vdev_removing;
}
/*
* Get extended stats
*/
static void
vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx)
{
(void) cvd;
int t, b;
for (t = 0; t < ZIO_TYPES; t++) {
for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++)
vsx->vsx_disk_histo[t][b] += cvsx->vsx_disk_histo[t][b];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_total_histo[0]); b++) {
vsx->vsx_total_histo[t][b] +=
cvsx->vsx_total_histo[t][b];
}
}
for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
for (b = 0; b < ARRAY_SIZE(vsx->vsx_queue_histo[0]); b++) {
vsx->vsx_queue_histo[t][b] +=
cvsx->vsx_queue_histo[t][b];
}
vsx->vsx_active_queue[t] += cvsx->vsx_active_queue[t];
vsx->vsx_pend_queue[t] += cvsx->vsx_pend_queue[t];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_ind_histo[0]); b++)
vsx->vsx_ind_histo[t][b] += cvsx->vsx_ind_histo[t][b];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_agg_histo[0]); b++)
vsx->vsx_agg_histo[t][b] += cvsx->vsx_agg_histo[t][b];
}
}
boolean_t
vdev_is_spacemap_addressable(vdev_t *vd)
{
if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_SPACEMAP_V2))
return (B_TRUE);
/*
* If double-word space map entries are not enabled we assume
* 47 bits of the space map entry are dedicated to the entry's
* offset (see SM_OFFSET_BITS in space_map.h). We then use that
* to calculate the maximum address that can be described by a
* space map entry for the given device.
*/
uint64_t shift = vd->vdev_ashift + SM_OFFSET_BITS;
if (shift >= 63) /* detect potential overflow */
return (B_TRUE);
return (vd->vdev_asize < (1ULL << shift));
}
/*
* Get statistics for the given vdev.
*/
static void
vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
{
int t;
/*
* If we're getting stats on the root vdev, aggregate the I/O counts
* over all top-level vdevs (i.e. the direct children of the root).
*/
if (!vd->vdev_ops->vdev_op_leaf) {
if (vs) {
memset(vs->vs_ops, 0, sizeof (vs->vs_ops));
memset(vs->vs_bytes, 0, sizeof (vs->vs_bytes));
}
if (vsx)
memset(vsx, 0, sizeof (*vsx));
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
vdev_stat_t *cvs = &cvd->vdev_stat;
vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex;
vdev_get_stats_ex_impl(cvd, cvs, cvsx);
if (vs)
vdev_get_child_stat(cvd, vs, cvs);
if (vsx)
vdev_get_child_stat_ex(cvd, vsx, cvsx);
}
} else {
/*
* We're a leaf. Just copy our ZIO active queue stats in. The
* other leaf stats are updated in vdev_stat_update().
*/
if (!vsx)
return;
memcpy(vsx, &vd->vdev_stat_ex, sizeof (vd->vdev_stat_ex));
for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
vsx->vsx_active_queue[t] = vd->vdev_queue.vq_cactive[t];
vsx->vsx_pend_queue[t] = vdev_queue_class_length(vd, t);
}
}
}
void
vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
{
vdev_t *tvd = vd->vdev_top;
mutex_enter(&vd->vdev_stat_lock);
if (vs) {
memcpy(vs, &vd->vdev_stat, sizeof (*vs));
vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
vs->vs_state = vd->vdev_state;
vs->vs_rsize = vdev_get_min_asize(vd);
if (vd->vdev_ops->vdev_op_leaf) {
vs->vs_pspace = vd->vdev_psize;
vs->vs_rsize += VDEV_LABEL_START_SIZE +
VDEV_LABEL_END_SIZE;
/*
* Report initializing progress. Since we don't
* have the initializing locks held, this is only
* an estimate (although a fairly accurate one).
*/
vs->vs_initialize_bytes_done =
vd->vdev_initialize_bytes_done;
vs->vs_initialize_bytes_est =
vd->vdev_initialize_bytes_est;
vs->vs_initialize_state = vd->vdev_initialize_state;
vs->vs_initialize_action_time =
vd->vdev_initialize_action_time;
/*
* Report manual TRIM progress. Since we don't have
* the manual TRIM locks held, this is only an
* estimate (although fairly accurate one).
*/
vs->vs_trim_notsup = !vd->vdev_has_trim;
vs->vs_trim_bytes_done = vd->vdev_trim_bytes_done;
vs->vs_trim_bytes_est = vd->vdev_trim_bytes_est;
vs->vs_trim_state = vd->vdev_trim_state;
vs->vs_trim_action_time = vd->vdev_trim_action_time;
/* Set when there is a deferred resilver. */
vs->vs_resilver_deferred = vd->vdev_resilver_deferred;
}
/*
* Report expandable space on top-level, non-auxiliary devices
* only. The expandable space is reported in terms of metaslab
* sized units since that determines how much space the pool
* can expand.
*/
if (vd->vdev_aux == NULL && tvd != NULL) {
vs->vs_esize = P2ALIGN(
vd->vdev_max_asize - vd->vdev_asize,
1ULL << tvd->vdev_ms_shift);
}
vs->vs_configured_ashift = vd->vdev_top != NULL
? vd->vdev_top->vdev_ashift : vd->vdev_ashift;
vs->vs_logical_ashift = vd->vdev_logical_ashift;
if (vd->vdev_physical_ashift <= ASHIFT_MAX)
vs->vs_physical_ashift = vd->vdev_physical_ashift;
else
vs->vs_physical_ashift = 0;
/*
* Report fragmentation and rebuild progress for top-level,
* non-auxiliary, concrete devices.
*/
if (vd->vdev_aux == NULL && vd == vd->vdev_top &&
vdev_is_concrete(vd)) {
/*
* The vdev fragmentation rating doesn't take into
* account the embedded slog metaslab (vdev_log_mg).
* Since it's only one metaslab, it would have a tiny
* impact on the overall fragmentation.
*/
vs->vs_fragmentation = (vd->vdev_mg != NULL) ?
vd->vdev_mg->mg_fragmentation : 0;
}
vs->vs_noalloc = MAX(vd->vdev_noalloc,
tvd ? tvd->vdev_noalloc : 0);
}
vdev_get_stats_ex_impl(vd, vs, vsx);
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
{
return (vdev_get_stats_ex(vd, vs, NULL));
}
void
vdev_clear_stats(vdev_t *vd)
{
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_space = 0;
vd->vdev_stat.vs_dspace = 0;
vd->vdev_stat.vs_alloc = 0;
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_scan_stat_init(vdev_t *vd)
{
vdev_stat_t *vs = &vd->vdev_stat;
for (int c = 0; c < vd->vdev_children; c++)
vdev_scan_stat_init(vd->vdev_child[c]);
mutex_enter(&vd->vdev_stat_lock);
vs->vs_scan_processed = 0;
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_stat_update(zio_t *zio, uint64_t psize)
{
spa_t *spa = zio->io_spa;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
vdev_t *pvd;
uint64_t txg = zio->io_txg;
/* Suppress ASAN false positive */
#ifdef __SANITIZE_ADDRESS__
vdev_stat_t *vs = vd ? &vd->vdev_stat : NULL;
vdev_stat_ex_t *vsx = vd ? &vd->vdev_stat_ex : NULL;
#else
vdev_stat_t *vs = &vd->vdev_stat;
vdev_stat_ex_t *vsx = &vd->vdev_stat_ex;
#endif
zio_type_t type = zio->io_type;
int flags = zio->io_flags;
/*
* If this i/o is a gang leader, it didn't do any actual work.
*/
if (zio->io_gang_tree)
return;
if (zio->io_error == 0) {
/*
* If this is a root i/o, don't count it -- we've already
* counted the top-level vdevs, and vdev_get_stats() will
* aggregate them when asked. This reduces contention on
* the root vdev_stat_lock and implicitly handles blocks
* that compress away to holes, for which there is no i/o.
* (Holes never create vdev children, so all the counters
* remain zero, which is what we want.)
*
* Note: this only applies to successful i/o (io_error == 0)
* because unlike i/o counts, errors are not additive.
* When reading a ditto block, for example, failure of
* one top-level vdev does not imply a root-level error.
*/
if (vd == rvd)
return;
ASSERT(vd == zio->io_vd);
if (flags & ZIO_FLAG_IO_BYPASS)
return;
mutex_enter(&vd->vdev_stat_lock);
if (flags & ZIO_FLAG_IO_REPAIR) {
/*
* Repair is the result of a resilver issued by the
* scan thread (spa_sync).
*/
if (flags & ZIO_FLAG_SCAN_THREAD) {
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
dsl_scan_phys_t *scn_phys = &scn->scn_phys;
uint64_t *processed = &scn_phys->scn_processed;
if (vd->vdev_ops->vdev_op_leaf)
atomic_add_64(processed, psize);
vs->vs_scan_processed += psize;
}
/*
* Repair is the result of a rebuild issued by the
* rebuild thread (vdev_rebuild_thread). To avoid
* double counting repaired bytes the virtual dRAID
* spare vdev is excluded from the processed bytes.
*/
if (zio->io_priority == ZIO_PRIORITY_REBUILD) {
vdev_t *tvd = vd->vdev_top;
vdev_rebuild_t *vr = &tvd->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
uint64_t *rebuilt = &vrp->vrp_bytes_rebuilt;
if (vd->vdev_ops->vdev_op_leaf &&
vd->vdev_ops != &vdev_draid_spare_ops) {
atomic_add_64(rebuilt, psize);
}
vs->vs_rebuild_processed += psize;
}
if (flags & ZIO_FLAG_SELF_HEAL)
vs->vs_self_healed += psize;
}
/*
* The bytes/ops/histograms are recorded at the leaf level and
* aggregated into the higher level vdevs in vdev_get_stats().
*/
if (vd->vdev_ops->vdev_op_leaf &&
(zio->io_priority < ZIO_PRIORITY_NUM_QUEUEABLE)) {
zio_type_t vs_type = type;
zio_priority_t priority = zio->io_priority;
/*
* TRIM ops and bytes are reported to user space as
* ZIO_TYPE_IOCTL. This is done to preserve the
* vdev_stat_t structure layout for user space.
*/
if (type == ZIO_TYPE_TRIM)
vs_type = ZIO_TYPE_IOCTL;
/*
* Solely for the purposes of 'zpool iostat -lqrw'
* reporting use the priority to categorize the IO.
* Only the following are reported to user space:
*
* ZIO_PRIORITY_SYNC_READ,
* ZIO_PRIORITY_SYNC_WRITE,
* ZIO_PRIORITY_ASYNC_READ,
* ZIO_PRIORITY_ASYNC_WRITE,
* ZIO_PRIORITY_SCRUB,
* ZIO_PRIORITY_TRIM,
* ZIO_PRIORITY_REBUILD.
*/
if (priority == ZIO_PRIORITY_INITIALIZING) {
ASSERT3U(type, ==, ZIO_TYPE_WRITE);
priority = ZIO_PRIORITY_ASYNC_WRITE;
} else if (priority == ZIO_PRIORITY_REMOVAL) {
priority = ((type == ZIO_TYPE_WRITE) ?
ZIO_PRIORITY_ASYNC_WRITE :
ZIO_PRIORITY_ASYNC_READ);
}
vs->vs_ops[vs_type]++;
vs->vs_bytes[vs_type] += psize;
if (flags & ZIO_FLAG_DELEGATED) {
vsx->vsx_agg_histo[priority]
[RQ_HISTO(zio->io_size)]++;
} else {
vsx->vsx_ind_histo[priority]
[RQ_HISTO(zio->io_size)]++;
}
if (zio->io_delta && zio->io_delay) {
vsx->vsx_queue_histo[priority]
[L_HISTO(zio->io_delta - zio->io_delay)]++;
vsx->vsx_disk_histo[type]
[L_HISTO(zio->io_delay)]++;
vsx->vsx_total_histo[type]
[L_HISTO(zio->io_delta)]++;
}
}
mutex_exit(&vd->vdev_stat_lock);
return;
}
if (flags & ZIO_FLAG_SPECULATIVE)
return;
/*
* If this is an I/O error that is going to be retried, then ignore the
* error. Otherwise, the user may interpret B_FAILFAST I/O errors as
* hard errors, when in reality they can happen for any number of
* innocuous reasons (bus resets, MPxIO link failure, etc).
*/
if (zio->io_error == EIO &&
!(zio->io_flags & ZIO_FLAG_IO_RETRY))
return;
/*
* Intent logs writes won't propagate their error to the root
* I/O so don't mark these types of failures as pool-level
* errors.
*/
if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
return;
if (type == ZIO_TYPE_WRITE && txg != 0 &&
(!(flags & ZIO_FLAG_IO_REPAIR) ||
(flags & ZIO_FLAG_SCAN_THREAD) ||
spa->spa_claiming)) {
/*
* This is either a normal write (not a repair), or it's
* a repair induced by the scrub thread, or it's a repair
* made by zil_claim() during spa_load() in the first txg.
* In the normal case, we commit the DTL change in the same
* txg as the block was born. In the scrub-induced repair
* case, we know that scrubs run in first-pass syncing context,
* so we commit the DTL change in spa_syncing_txg(spa).
* In the zil_claim() case, we commit in spa_first_txg(spa).
*
* We currently do not make DTL entries for failed spontaneous
* self-healing writes triggered by normal (non-scrubbing)
* reads, because we have no transactional context in which to
* do so -- and it's not clear that it'd be desirable anyway.
*/
if (vd->vdev_ops->vdev_op_leaf) {
uint64_t commit_txg = txg;
if (flags & ZIO_FLAG_SCAN_THREAD) {
ASSERT(flags & ZIO_FLAG_IO_REPAIR);
ASSERT(spa_sync_pass(spa) == 1);
vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
commit_txg = spa_syncing_txg(spa);
} else if (spa->spa_claiming) {
ASSERT(flags & ZIO_FLAG_IO_REPAIR);
commit_txg = spa_first_txg(spa);
}
ASSERT(commit_txg >= spa_syncing_txg(spa));
if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
return;
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
}
if (vd != rvd)
vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
}
}
int64_t
vdev_deflated_space(vdev_t *vd, int64_t space)
{
ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0);
ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio);
}
/*
* Update the in-core space usage stats for this vdev, its metaslab class,
* and the root vdev.
*/
void
vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
int64_t space_delta)
{
(void) defer_delta;
int64_t dspace_delta;
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(vd == vd->vdev_top);
/*
* Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
* factor. We must calculate this here and not at the root vdev
* because the root vdev's psize-to-asize is simply the max of its
* children's, thus not accurate enough for us.
*/
dspace_delta = vdev_deflated_space(vd, space_delta);
mutex_enter(&vd->vdev_stat_lock);
/* ensure we won't underflow */
if (alloc_delta < 0) {
ASSERT3U(vd->vdev_stat.vs_alloc, >=, -alloc_delta);
}
vd->vdev_stat.vs_alloc += alloc_delta;
vd->vdev_stat.vs_space += space_delta;
vd->vdev_stat.vs_dspace += dspace_delta;
mutex_exit(&vd->vdev_stat_lock);
/* every class but log contributes to root space stats */
if (vd->vdev_mg != NULL && !vd->vdev_islog) {
ASSERT(!vd->vdev_isl2cache);
mutex_enter(&rvd->vdev_stat_lock);
rvd->vdev_stat.vs_alloc += alloc_delta;
rvd->vdev_stat.vs_space += space_delta;
rvd->vdev_stat.vs_dspace += dspace_delta;
mutex_exit(&rvd->vdev_stat_lock);
}
/* Note: metaslab_class_space_update moved to metaslab_space_update */
}
/*
* Mark a top-level vdev's config as dirty, placing it on the dirty list
* so that it will be written out next time the vdev configuration is synced.
* If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
*/
void
vdev_config_dirty(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
int c;
ASSERT(spa_writeable(spa));
/*
* If this is an aux vdev (as with l2cache and spare devices), then we
* update the vdev config manually and set the sync flag.
*/
if (vd->vdev_aux != NULL) {
spa_aux_vdev_t *sav = vd->vdev_aux;
nvlist_t **aux;
uint_t naux;
for (c = 0; c < sav->sav_count; c++) {
if (sav->sav_vdevs[c] == vd)
break;
}
if (c == sav->sav_count) {
/*
* We're being removed. There's nothing more to do.
*/
ASSERT(sav->sav_sync == B_TRUE);
return;
}
sav->sav_sync = B_TRUE;
if (nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
}
ASSERT(c < naux);
/*
* Setting the nvlist in the middle if the array is a little
* sketchy, but it will work.
*/
nvlist_free(aux[c]);
aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0);
return;
}
/*
* The dirty list is protected by the SCL_CONFIG lock. The caller
* must either hold SCL_CONFIG as writer, or must be the sync thread
* (which holds SCL_CONFIG as reader). There's only one sync thread,
* so this is sufficient to ensure mutual exclusion.
*/
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_CONFIG, RW_READER)));
if (vd == rvd) {
for (c = 0; c < rvd->vdev_children; c++)
vdev_config_dirty(rvd->vdev_child[c]);
} else {
ASSERT(vd == vd->vdev_top);
if (!list_link_active(&vd->vdev_config_dirty_node) &&
vdev_is_concrete(vd)) {
list_insert_head(&spa->spa_config_dirty_list, vd);
}
}
}
void
vdev_config_clean(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_CONFIG, RW_READER)));
ASSERT(list_link_active(&vd->vdev_config_dirty_node));
list_remove(&spa->spa_config_dirty_list, vd);
}
/*
* Mark a top-level vdev's state as dirty, so that the next pass of
* spa_sync() can convert this into vdev_config_dirty(). We distinguish
* the state changes from larger config changes because they require
* much less locking, and are often needed for administrative actions.
*/
void
vdev_state_dirty(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_writeable(spa));
ASSERT(vd == vd->vdev_top);
/*
* The state list is protected by the SCL_STATE lock. The caller
* must either hold SCL_STATE as writer, or must be the sync thread
* (which holds SCL_STATE as reader). There's only one sync thread,
* so this is sufficient to ensure mutual exclusion.
*/
ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_STATE, RW_READER)));
if (!list_link_active(&vd->vdev_state_dirty_node) &&
vdev_is_concrete(vd))
list_insert_head(&spa->spa_state_dirty_list, vd);
}
void
vdev_state_clean(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_STATE, RW_READER)));
ASSERT(list_link_active(&vd->vdev_state_dirty_node));
list_remove(&spa->spa_state_dirty_list, vd);
}
/*
* Propagate vdev state up from children to parent.
*/
void
vdev_propagate_state(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
int degraded = 0, faulted = 0;
int corrupted = 0;
vdev_t *child;
if (vd->vdev_children > 0) {
for (int c = 0; c < vd->vdev_children; c++) {
child = vd->vdev_child[c];
/*
* Don't factor holes or indirect vdevs into the
* decision.
*/
if (!vdev_is_concrete(child))
continue;
if (!vdev_readable(child) ||
(!vdev_writeable(child) && spa_writeable(spa))) {
/*
* Root special: if there is a top-level log
* device, treat the root vdev as if it were
* degraded.
*/
if (child->vdev_islog && vd == rvd)
degraded++;
else
faulted++;
} else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
degraded++;
}
if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
corrupted++;
}
vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
/*
* Root special: if there is a top-level vdev that cannot be
* opened due to corrupted metadata, then propagate the root
* vdev's aux state as 'corrupt' rather than 'insufficient
* replicas'.
*/
if (corrupted && vd == rvd &&
rvd->vdev_state == VDEV_STATE_CANT_OPEN)
vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
}
if (vd->vdev_parent)
vdev_propagate_state(vd->vdev_parent);
}
/*
* Set a vdev's state. If this is during an open, we don't update the parent
* state, because we're in the process of opening children depth-first.
* Otherwise, we propagate the change to the parent.
*
* If this routine places a device in a faulted state, an appropriate ereport is
* generated.
*/
void
vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
{
uint64_t save_state;
spa_t *spa = vd->vdev_spa;
if (state == vd->vdev_state) {
/*
* Since vdev_offline() code path is already in an offline
* state we can miss a statechange event to OFFLINE. Check
* the previous state to catch this condition.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(state == VDEV_STATE_OFFLINE) &&
(vd->vdev_prevstate >= VDEV_STATE_FAULTED)) {
/* post an offline state change */
zfs_post_state_change(spa, vd, vd->vdev_prevstate);
}
vd->vdev_stat.vs_aux = aux;
return;
}
save_state = vd->vdev_state;
vd->vdev_state = state;
vd->vdev_stat.vs_aux = aux;
/*
* If we are setting the vdev state to anything but an open state, then
* always close the underlying device unless the device has requested
* a delayed close (i.e. we're about to remove or fault the device).
* Otherwise, we keep accessible but invalid devices open forever.
* We don't call vdev_close() itself, because that implies some extra
* checks (offline, etc) that we don't want here. This is limited to
* leaf devices, because otherwise closing the device will affect other
* children.
*/
if (!vd->vdev_delayed_close && vdev_is_dead(vd) &&
vd->vdev_ops->vdev_op_leaf)
vd->vdev_ops->vdev_op_close(vd);
if (vd->vdev_removed &&
state == VDEV_STATE_CANT_OPEN &&
(aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
/*
* If the previous state is set to VDEV_STATE_REMOVED, then this
* device was previously marked removed and someone attempted to
* reopen it. If this failed due to a nonexistent device, then
* keep the device in the REMOVED state. We also let this be if
* it is one of our special test online cases, which is only
* attempting to online the device and shouldn't generate an FMA
* fault.
*/
vd->vdev_state = VDEV_STATE_REMOVED;
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
} else if (state == VDEV_STATE_REMOVED) {
vd->vdev_removed = B_TRUE;
} else if (state == VDEV_STATE_CANT_OPEN) {
/*
* If we fail to open a vdev during an import or recovery, we
* mark it as "not available", which signifies that it was
* never there to begin with. Failure to open such a device
* is not considered an error.
*/
if ((spa_load_state(spa) == SPA_LOAD_IMPORT ||
spa_load_state(spa) == SPA_LOAD_RECOVER) &&
vd->vdev_ops->vdev_op_leaf)
vd->vdev_not_present = 1;
/*
* Post the appropriate ereport. If the 'prevstate' field is
* set to something other than VDEV_STATE_UNKNOWN, it indicates
* that this is part of a vdev_reopen(). In this case, we don't
* want to post the ereport if the device was already in the
* CANT_OPEN state beforehand.
*
* If the 'checkremove' flag is set, then this is an attempt to
* online the device in response to an insertion event. If we
* hit this case, then we have detected an insertion event for a
* faulted or offline device that wasn't in the removed state.
* In this scenario, we don't post an ereport because we are
* about to replace the device, or attempt an online with
* vdev_forcefault, which will generate the fault for us.
*/
if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
!vd->vdev_not_present && !vd->vdev_checkremove &&
vd != spa->spa_root_vdev) {
const char *class;
switch (aux) {
case VDEV_AUX_OPEN_FAILED:
class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
break;
case VDEV_AUX_CORRUPT_DATA:
class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
break;
case VDEV_AUX_NO_REPLICAS:
class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
break;
case VDEV_AUX_BAD_GUID_SUM:
class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
break;
case VDEV_AUX_TOO_SMALL:
class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
break;
case VDEV_AUX_BAD_LABEL:
class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
break;
case VDEV_AUX_BAD_ASHIFT:
class = FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT;
break;
default:
class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
}
(void) zfs_ereport_post(class, spa, vd, NULL, NULL,
save_state);
}
/* Erase any notion of persistent removed state */
vd->vdev_removed = B_FALSE;
} else {
vd->vdev_removed = B_FALSE;
}
/*
* Notify ZED of any significant state-change on a leaf vdev.
*
*/
if (vd->vdev_ops->vdev_op_leaf) {
/* preserve original state from a vdev_reopen() */
if ((vd->vdev_prevstate != VDEV_STATE_UNKNOWN) &&
(vd->vdev_prevstate != vd->vdev_state) &&
(save_state <= VDEV_STATE_CLOSED))
save_state = vd->vdev_prevstate;
/* filter out state change due to initial vdev_open */
if (save_state > VDEV_STATE_CLOSED)
zfs_post_state_change(spa, vd, save_state);
}
if (!isopen && vd->vdev_parent)
vdev_propagate_state(vd->vdev_parent);
}
boolean_t
vdev_children_are_offline(vdev_t *vd)
{
ASSERT(!vd->vdev_ops->vdev_op_leaf);
for (uint64_t i = 0; i < vd->vdev_children; i++) {
if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE)
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Check the vdev configuration to ensure that it's capable of supporting
* a root pool. We do not support partial configuration.
*/
boolean_t
vdev_is_bootable(vdev_t *vd)
{
if (!vd->vdev_ops->vdev_op_leaf) {
const char *vdev_type = vd->vdev_ops->vdev_op_type;
if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0)
return (B_FALSE);
}
for (int c = 0; c < vd->vdev_children; c++) {
if (!vdev_is_bootable(vd->vdev_child[c]))
return (B_FALSE);
}
return (B_TRUE);
}
boolean_t
vdev_is_concrete(vdev_t *vd)
{
vdev_ops_t *ops = vd->vdev_ops;
if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops ||
ops == &vdev_missing_ops || ops == &vdev_root_ops) {
return (B_FALSE);
} else {
return (B_TRUE);
}
}
/*
* Determine if a log device has valid content. If the vdev was
* removed or faulted in the MOS config then we know that
* the content on the log device has already been written to the pool.
*/
boolean_t
vdev_log_state_valid(vdev_t *vd)
{
if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
!vd->vdev_removed)
return (B_TRUE);
for (int c = 0; c < vd->vdev_children; c++)
if (vdev_log_state_valid(vd->vdev_child[c]))
return (B_TRUE);
return (B_FALSE);
}
/*
* Expand a vdev if possible.
*/
void
vdev_expand(vdev_t *vd, uint64_t txg)
{
ASSERT(vd->vdev_top == vd);
ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(vdev_is_concrete(vd));
vdev_set_deflate_ratio(vd);
if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
vdev_is_concrete(vd)) {
vdev_metaslab_group_create(vd);
VERIFY(vdev_metaslab_init(vd, txg) == 0);
vdev_config_dirty(vd);
}
}
/*
* Split a vdev.
*/
void
vdev_split(vdev_t *vd)
{
vdev_t *cvd, *pvd = vd->vdev_parent;
VERIFY3U(pvd->vdev_children, >, 1);
vdev_remove_child(pvd, vd);
vdev_compact_children(pvd);
ASSERT3P(pvd->vdev_child, !=, NULL);
cvd = pvd->vdev_child[0];
if (pvd->vdev_children == 1) {
vdev_remove_parent(cvd);
cvd->vdev_splitting = B_TRUE;
}
vdev_propagate_state(cvd);
}
void
vdev_deadman(vdev_t *vd, const char *tag)
{
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
vdev_deadman(cvd, tag);
}
if (vd->vdev_ops->vdev_op_leaf) {
vdev_queue_t *vq = &vd->vdev_queue;
mutex_enter(&vq->vq_lock);
if (vq->vq_active > 0) {
spa_t *spa = vd->vdev_spa;
zio_t *fio;
uint64_t delta;
zfs_dbgmsg("slow vdev: %s has %u active IOs",
vd->vdev_path, vq->vq_active);
/*
* Look at the head of all the pending queues,
* if any I/O has been outstanding for longer than
* the spa_deadman_synctime invoke the deadman logic.
*/
fio = list_head(&vq->vq_active_list);
delta = gethrtime() - fio->io_timestamp;
if (delta > spa_deadman_synctime(spa))
zio_deadman(fio, tag);
}
mutex_exit(&vq->vq_lock);
}
}
void
vdev_defer_resilver(vdev_t *vd)
{
ASSERT(vd->vdev_ops->vdev_op_leaf);
vd->vdev_resilver_deferred = B_TRUE;
vd->vdev_spa->spa_resilver_deferred = B_TRUE;
}
/*
* Clears the resilver deferred flag on all leaf devs under vd. Returns
* B_TRUE if we have devices that need to be resilvered and are available to
* accept resilver I/Os.
*/
boolean_t
vdev_clear_resilver_deferred(vdev_t *vd, dmu_tx_t *tx)
{
boolean_t resilver_needed = B_FALSE;
spa_t *spa = vd->vdev_spa;
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
resilver_needed |= vdev_clear_resilver_deferred(cvd, tx);
}
if (vd == spa->spa_root_vdev &&
spa_feature_is_active(spa, SPA_FEATURE_RESILVER_DEFER)) {
spa_feature_decr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
vdev_config_dirty(vd);
spa->spa_resilver_deferred = B_FALSE;
return (resilver_needed);
}
if (!vdev_is_concrete(vd) || vd->vdev_aux ||
!vd->vdev_ops->vdev_op_leaf)
return (resilver_needed);
vd->vdev_resilver_deferred = B_FALSE;
return (!vdev_is_dead(vd) && !vd->vdev_offline &&
vdev_resilver_needed(vd, NULL, NULL));
}
boolean_t
vdev_xlate_is_empty(range_seg64_t *rs)
{
return (rs->rs_start == rs->rs_end);
}
/*
* Translate a logical range to the first contiguous physical range for the
* specified vdev_t. This function is initially called with a leaf vdev and
* will walk each parent vdev until it reaches a top-level vdev. Once the
* top-level is reached the physical range is initialized and the recursive
* function begins to unwind. As it unwinds it calls the parent's vdev
* specific translation function to do the real conversion.
*/
void
vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
{
/*
* Walk up the vdev tree
*/
if (vd != vd->vdev_top) {
vdev_xlate(vd->vdev_parent, logical_rs, physical_rs,
remain_rs);
} else {
/*
* We've reached the top-level vdev, initialize the physical
* range to the logical range and set an empty remaining
* range then start to unwind.
*/
physical_rs->rs_start = logical_rs->rs_start;
physical_rs->rs_end = logical_rs->rs_end;
remain_rs->rs_start = logical_rs->rs_start;
remain_rs->rs_end = logical_rs->rs_start;
return;
}
vdev_t *pvd = vd->vdev_parent;
ASSERT3P(pvd, !=, NULL);
ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL);
/*
* As this recursive function unwinds, translate the logical
* range into its physical and any remaining components by calling
* the vdev specific translate function.
*/
range_seg64_t intermediate = { 0 };
pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate, remain_rs);
physical_rs->rs_start = intermediate.rs_start;
physical_rs->rs_end = intermediate.rs_end;
}
void
vdev_xlate_walk(vdev_t *vd, const range_seg64_t *logical_rs,
vdev_xlate_func_t *func, void *arg)
{
range_seg64_t iter_rs = *logical_rs;
range_seg64_t physical_rs;
range_seg64_t remain_rs;
while (!vdev_xlate_is_empty(&iter_rs)) {
vdev_xlate(vd, &iter_rs, &physical_rs, &remain_rs);
/*
* With raidz and dRAID, it's possible that the logical range
* does not live on this leaf vdev. Only when there is a non-
* zero physical size call the provided function.
*/
if (!vdev_xlate_is_empty(&physical_rs))
func(arg, &physical_rs);
iter_rs = remain_rs;
}
}
static char *
vdev_name(vdev_t *vd, char *buf, int buflen)
{
if (vd->vdev_path == NULL) {
if (strcmp(vd->vdev_ops->vdev_op_type, "root") == 0) {
strlcpy(buf, vd->vdev_spa->spa_name, buflen);
} else if (!vd->vdev_ops->vdev_op_leaf) {
snprintf(buf, buflen, "%s-%llu",
vd->vdev_ops->vdev_op_type,
(u_longlong_t)vd->vdev_id);
}
} else {
strlcpy(buf, vd->vdev_path, buflen);
}
return (buf);
}
/*
* Look at the vdev tree and determine whether any devices are currently being
* replaced.
*/
boolean_t
vdev_replace_in_progress(vdev_t *vdev)
{
ASSERT(spa_config_held(vdev->vdev_spa, SCL_ALL, RW_READER) != 0);
if (vdev->vdev_ops == &vdev_replacing_ops)
return (B_TRUE);
/*
* A 'spare' vdev indicates that we have a replace in progress, unless
* it has exactly two children, and the second, the hot spare, has
* finished being resilvered.
*/
if (vdev->vdev_ops == &vdev_spare_ops && (vdev->vdev_children > 2 ||
!vdev_dtl_empty(vdev->vdev_child[1], DTL_MISSING)))
return (B_TRUE);
for (int i = 0; i < vdev->vdev_children; i++) {
if (vdev_replace_in_progress(vdev->vdev_child[i]))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Add a (source=src, propname=propval) list to an nvlist.
*/
static void
vdev_prop_add_list(nvlist_t *nvl, const char *propname, const char *strval,
uint64_t intval, zprop_source_t src)
{
nvlist_t *propval;
propval = fnvlist_alloc();
fnvlist_add_uint64(propval, ZPROP_SOURCE, src);
if (strval != NULL)
fnvlist_add_string(propval, ZPROP_VALUE, strval);
else
fnvlist_add_uint64(propval, ZPROP_VALUE, intval);
fnvlist_add_nvlist(nvl, propname, propval);
nvlist_free(propval);
}
static void
vdev_props_set_sync(void *arg, dmu_tx_t *tx)
{
vdev_t *vd;
nvlist_t *nvp = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
objset_t *mos = spa->spa_meta_objset;
nvpair_t *elem = NULL;
uint64_t vdev_guid;
+ uint64_t objid;
nvlist_t *nvprops;
vdev_guid = fnvlist_lookup_uint64(nvp, ZPOOL_VDEV_PROPS_SET_VDEV);
nvprops = fnvlist_lookup_nvlist(nvp, ZPOOL_VDEV_PROPS_SET_PROPS);
vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE);
/* this vdev could get removed while waiting for this sync task */
if (vd == NULL)
return;
+ /*
+ * Set vdev property values in the vdev props mos object.
+ */
+ if (vd->vdev_root_zap != 0) {
+ objid = vd->vdev_root_zap;
+ } else if (vd->vdev_top_zap != 0) {
+ objid = vd->vdev_top_zap;
+ } else if (vd->vdev_leaf_zap != 0) {
+ objid = vd->vdev_leaf_zap;
+ } else {
+ panic("unexpected vdev type");
+ }
+
mutex_enter(&spa->spa_props_lock);
while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
- uint64_t intval, objid = 0;
+ uint64_t intval;
const char *strval;
vdev_prop_t prop;
const char *propname = nvpair_name(elem);
zprop_type_t proptype;
- /*
- * Set vdev property values in the vdev props mos object.
- */
- if (vd->vdev_root_zap != 0) {
- objid = vd->vdev_root_zap;
- } else if (vd->vdev_top_zap != 0) {
- objid = vd->vdev_top_zap;
- } else if (vd->vdev_leaf_zap != 0) {
- objid = vd->vdev_leaf_zap;
- } else {
- /*
- * XXX: implement vdev_props_set_check()
- */
- panic("vdev not root/top/leaf");
- }
-
switch (prop = vdev_name_to_prop(propname)) {
case VDEV_PROP_USERPROP:
if (vdev_prop_user(propname)) {
strval = fnvpair_value_string(elem);
if (strlen(strval) == 0) {
/* remove the property if value == "" */
(void) zap_remove(mos, objid, propname,
tx);
} else {
VERIFY0(zap_update(mos, objid, propname,
1, strlen(strval) + 1, strval, tx));
}
spa_history_log_internal(spa, "vdev set", tx,
"vdev_guid=%llu: %s=%s",
(u_longlong_t)vdev_guid, nvpair_name(elem),
strval);
}
break;
default:
/* normalize the property name */
propname = vdev_prop_to_name(prop);
proptype = vdev_prop_get_type(prop);
if (nvpair_type(elem) == DATA_TYPE_STRING) {
ASSERT(proptype == PROP_TYPE_STRING);
strval = fnvpair_value_string(elem);
VERIFY0(zap_update(mos, objid, propname,
1, strlen(strval) + 1, strval, tx));
spa_history_log_internal(spa, "vdev set", tx,
"vdev_guid=%llu: %s=%s",
(u_longlong_t)vdev_guid, nvpair_name(elem),
strval);
} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
intval = fnvpair_value_uint64(elem);
if (proptype == PROP_TYPE_INDEX) {
const char *unused;
VERIFY0(vdev_prop_index_to_string(
prop, intval, &unused));
}
VERIFY0(zap_update(mos, objid, propname,
sizeof (uint64_t), 1, &intval, tx));
spa_history_log_internal(spa, "vdev set", tx,
"vdev_guid=%llu: %s=%lld",
(u_longlong_t)vdev_guid,
nvpair_name(elem), (longlong_t)intval);
} else {
panic("invalid vdev property type %u",
nvpair_type(elem));
}
}
}
mutex_exit(&spa->spa_props_lock);
}
int
vdev_prop_set(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
{
spa_t *spa = vd->vdev_spa;
nvpair_t *elem = NULL;
uint64_t vdev_guid;
nvlist_t *nvprops;
int error = 0;
ASSERT(vd != NULL);
+ /* Check that vdev has a zap we can use */
+ if (vd->vdev_root_zap == 0 &&
+ vd->vdev_top_zap == 0 &&
+ vd->vdev_leaf_zap == 0)
+ return (SET_ERROR(EINVAL));
+
if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_SET_VDEV,
&vdev_guid) != 0)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_SET_PROPS,
&nvprops) != 0)
return (SET_ERROR(EINVAL));
if ((vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE)) == NULL)
return (SET_ERROR(EINVAL));
while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
const char *propname = nvpair_name(elem);
vdev_prop_t prop = vdev_name_to_prop(propname);
uint64_t intval = 0;
const char *strval = NULL;
if (prop == VDEV_PROP_USERPROP && !vdev_prop_user(propname)) {
error = EINVAL;
goto end;
}
if (vdev_prop_readonly(prop)) {
error = EROFS;
goto end;
}
/* Special Processing */
switch (prop) {
case VDEV_PROP_PATH:
if (vd->vdev_path == NULL) {
error = EROFS;
break;
}
if (nvpair_value_string(elem, &strval) != 0) {
error = EINVAL;
break;
}
/* New path must start with /dev/ */
if (strncmp(strval, "/dev/", 5)) {
error = EINVAL;
break;
}
error = spa_vdev_setpath(spa, vdev_guid, strval);
break;
case VDEV_PROP_ALLOCATING:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
if (intval != vd->vdev_noalloc)
break;
if (intval == 0)
error = spa_vdev_noalloc(spa, vdev_guid);
else
error = spa_vdev_alloc(spa, vdev_guid);
break;
case VDEV_PROP_FAILFAST:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_failfast = intval & 1;
break;
case VDEV_PROP_CHECKSUM_N:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_checksum_n = intval;
break;
case VDEV_PROP_CHECKSUM_T:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_checksum_t = intval;
break;
case VDEV_PROP_IO_N:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_io_n = intval;
break;
case VDEV_PROP_IO_T:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_io_t = intval;
break;
default:
/* Most processing is done in vdev_props_set_sync */
break;
}
end:
if (error != 0) {
intval = error;
vdev_prop_add_list(outnvl, propname, strval, intval, 0);
return (error);
}
}
return (dsl_sync_task(spa->spa_name, NULL, vdev_props_set_sync,
innvl, 6, ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
int
vdev_prop_get(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
int err = 0;
uint64_t objid;
uint64_t vdev_guid;
nvpair_t *elem = NULL;
nvlist_t *nvprops = NULL;
uint64_t intval = 0;
char *strval = NULL;
const char *propname = NULL;
vdev_prop_t prop;
ASSERT(vd != NULL);
ASSERT(mos != NULL);
if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_GET_VDEV,
&vdev_guid) != 0)
return (SET_ERROR(EINVAL));
nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_GET_PROPS, &nvprops);
if (vd->vdev_root_zap != 0) {
objid = vd->vdev_root_zap;
} else if (vd->vdev_top_zap != 0) {
objid = vd->vdev_top_zap;
} else if (vd->vdev_leaf_zap != 0) {
objid = vd->vdev_leaf_zap;
} else {
return (SET_ERROR(EINVAL));
}
ASSERT(objid != 0);
mutex_enter(&spa->spa_props_lock);
if (nvprops != NULL) {
char namebuf[64] = { 0 };
while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
intval = 0;
strval = NULL;
propname = nvpair_name(elem);
prop = vdev_name_to_prop(propname);
zprop_source_t src = ZPROP_SRC_DEFAULT;
uint64_t integer_size, num_integers;
switch (prop) {
/* Special Read-only Properties */
case VDEV_PROP_NAME:
strval = vdev_name(vd, namebuf,
sizeof (namebuf));
if (strval == NULL)
continue;
vdev_prop_add_list(outnvl, propname, strval, 0,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_CAPACITY:
/* percent used */
intval = (vd->vdev_stat.vs_dspace == 0) ? 0 :
(vd->vdev_stat.vs_alloc * 100 /
vd->vdev_stat.vs_dspace);
vdev_prop_add_list(outnvl, propname, NULL,
intval, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_STATE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_state, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_GUID:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_guid, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_ASIZE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_asize, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PSIZE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_psize, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_ASHIFT:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_ashift, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_SIZE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_dspace, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_FREE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_dspace -
vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_ALLOCATED:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_EXPANDSZ:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_esize, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_FRAGMENTATION:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_fragmentation,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PARITY:
vdev_prop_add_list(outnvl, propname, NULL,
vdev_get_nparity(vd), ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PATH:
if (vd->vdev_path == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_path, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_DEVID:
if (vd->vdev_devid == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_devid, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PHYS_PATH:
if (vd->vdev_physpath == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_physpath, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_ENC_PATH:
if (vd->vdev_enc_sysfs_path == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_enc_sysfs_path, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_FRU:
if (vd->vdev_fru == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_fru, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PARENT:
if (vd->vdev_parent != NULL) {
strval = vdev_name(vd->vdev_parent,
namebuf, sizeof (namebuf));
vdev_prop_add_list(outnvl, propname,
strval, 0, ZPROP_SRC_NONE);
}
continue;
case VDEV_PROP_CHILDREN:
if (vd->vdev_children > 0)
strval = kmem_zalloc(ZAP_MAXVALUELEN,
KM_SLEEP);
for (uint64_t i = 0; i < vd->vdev_children;
i++) {
const char *vname;
vname = vdev_name(vd->vdev_child[i],
namebuf, sizeof (namebuf));
if (vname == NULL)
vname = "(unknown)";
if (strlen(strval) > 0)
strlcat(strval, ",",
ZAP_MAXVALUELEN);
strlcat(strval, vname, ZAP_MAXVALUELEN);
}
if (strval != NULL) {
vdev_prop_add_list(outnvl, propname,
strval, 0, ZPROP_SRC_NONE);
kmem_free(strval, ZAP_MAXVALUELEN);
}
continue;
case VDEV_PROP_NUMCHILDREN:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_children, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_READ_ERRORS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_read_errors,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_WRITE_ERRORS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_write_errors,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_CHECKSUM_ERRORS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_checksum_errors,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_INITIALIZE_ERRORS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_initialize_errors,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_NULL:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_NULL],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_READ:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_READ],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_WRITE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_WRITE],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_FREE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_FREE],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_CLAIM:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_CLAIM],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_TRIM:
/*
* TRIM ops and bytes are reported to user
* space as ZIO_TYPE_IOCTL. This is done to
* preserve the vdev_stat_t structure layout
* for user space.
*/
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_IOCTL],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_NULL:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_NULL],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_READ:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_READ],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_WRITE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_WRITE],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_FREE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_FREE],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_CLAIM:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_CLAIM],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_TRIM:
/*
* TRIM ops and bytes are reported to user
* space as ZIO_TYPE_IOCTL. This is done to
* preserve the vdev_stat_t structure layout
* for user space.
*/
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_IOCTL],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_REMOVING:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_removing, ZPROP_SRC_NONE);
continue;
/* Numeric Properites */
case VDEV_PROP_ALLOCATING:
/* Leaf vdevs cannot have this property */
if (vd->vdev_mg == NULL &&
vd->vdev_top != NULL) {
src = ZPROP_SRC_NONE;
intval = ZPROP_BOOLEAN_NA;
} else {
err = vdev_prop_get_int(vd, prop,
&intval);
if (err && err != ENOENT)
break;
if (intval ==
vdev_prop_default_numeric(prop))
src = ZPROP_SRC_DEFAULT;
else
src = ZPROP_SRC_LOCAL;
}
vdev_prop_add_list(outnvl, propname, NULL,
intval, src);
break;
case VDEV_PROP_FAILFAST:
src = ZPROP_SRC_LOCAL;
strval = NULL;
err = zap_lookup(mos, objid, nvpair_name(elem),
sizeof (uint64_t), 1, &intval);
if (err == ENOENT) {
intval = vdev_prop_default_numeric(
prop);
err = 0;
} else if (err) {
break;
}
if (intval == vdev_prop_default_numeric(prop))
src = ZPROP_SRC_DEFAULT;
vdev_prop_add_list(outnvl, propname, strval,
intval, src);
break;
case VDEV_PROP_CHECKSUM_N:
case VDEV_PROP_CHECKSUM_T:
case VDEV_PROP_IO_N:
case VDEV_PROP_IO_T:
err = vdev_prop_get_int(vd, prop, &intval);
if (err && err != ENOENT)
break;
if (intval == vdev_prop_default_numeric(prop))
src = ZPROP_SRC_DEFAULT;
else
src = ZPROP_SRC_LOCAL;
vdev_prop_add_list(outnvl, propname, NULL,
intval, src);
break;
/* Text Properties */
case VDEV_PROP_COMMENT:
/* Exists in the ZAP below */
/* FALLTHRU */
case VDEV_PROP_USERPROP:
/* User Properites */
src = ZPROP_SRC_LOCAL;
err = zap_length(mos, objid, nvpair_name(elem),
&integer_size, &num_integers);
if (err)
break;
switch (integer_size) {
case 8:
/* User properties cannot be integers */
err = EINVAL;
break;
case 1:
/* string property */
strval = kmem_alloc(num_integers,
KM_SLEEP);
err = zap_lookup(mos, objid,
nvpair_name(elem), 1,
num_integers, strval);
if (err) {
kmem_free(strval,
num_integers);
break;
}
vdev_prop_add_list(outnvl, propname,
strval, 0, src);
kmem_free(strval, num_integers);
break;
}
break;
default:
err = ENOENT;
break;
}
if (err)
break;
}
} else {
/*
* Get all properties from the MOS vdev property object.
*/
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, mos, objid);
(err = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
intval = 0;
strval = NULL;
zprop_source_t src = ZPROP_SRC_DEFAULT;
propname = za.za_name;
switch (za.za_integer_length) {
case 8:
/* We do not allow integer user properties */
/* This is likely an internal value */
break;
case 1:
/* string property */
strval = kmem_alloc(za.za_num_integers,
KM_SLEEP);
err = zap_lookup(mos, objid, za.za_name, 1,
za.za_num_integers, strval);
if (err) {
kmem_free(strval, za.za_num_integers);
break;
}
vdev_prop_add_list(outnvl, propname, strval, 0,
src);
kmem_free(strval, za.za_num_integers);
break;
default:
break;
}
}
zap_cursor_fini(&zc);
}
mutex_exit(&spa->spa_props_lock);
if (err && err != ENOENT) {
return (err);
}
return (0);
}
EXPORT_SYMBOL(vdev_fault);
EXPORT_SYMBOL(vdev_degrade);
EXPORT_SYMBOL(vdev_online);
EXPORT_SYMBOL(vdev_offline);
EXPORT_SYMBOL(vdev_clear);
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_count, UINT, ZMOD_RW,
"Target number of metaslabs per top-level vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_shift, UINT, ZMOD_RW,
"Default lower limit for metaslab size");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, max_ms_shift, UINT, ZMOD_RW,
"Default upper limit for metaslab size");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, min_ms_count, UINT, ZMOD_RW,
"Minimum number of metaslabs per top-level vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, ms_count_limit, UINT, ZMOD_RW,
"Practical upper limit of total metaslabs per top-level vdev");
ZFS_MODULE_PARAM(zfs, zfs_, slow_io_events_per_second, UINT, ZMOD_RW,
"Rate limit slow IO (delay) events to this many per second");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, checksum_events_per_second, UINT, ZMOD_RW,
"Rate limit checksum events to this many checksum errors per second "
"(do not set below ZED threshold).");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, scan_ignore_errors, INT, ZMOD_RW,
"Ignore errors during resilver/scrub");
ZFS_MODULE_PARAM(zfs_vdev, vdev_, validate_skip, INT, ZMOD_RW,
"Bypass vdev_validate()");
ZFS_MODULE_PARAM(zfs, zfs_, nocacheflush, INT, ZMOD_RW,
"Disable cache flushes");
ZFS_MODULE_PARAM(zfs, zfs_, embedded_slog_min_ms, UINT, ZMOD_RW,
"Minimum number of metaslabs required to dedicate one for log blocks");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, min_auto_ashift,
param_set_min_auto_ashift, param_get_uint, ZMOD_RW,
"Minimum ashift used when creating new top-level vdevs");
ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, max_auto_ashift,
param_set_max_auto_ashift, param_get_uint, ZMOD_RW,
"Maximum ashift used when optimizing for logical -> physical sector "
"size on new top-level vdevs");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/vdev_indirect.c b/sys/contrib/openzfs/module/zfs/vdev_indirect.c
index 89667585345d..acb725696674 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_indirect.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_indirect.c
@@ -1,1911 +1,1911 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2014, 2017 by Delphix. All rights reserved.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2014, 2020 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/fs/zfs.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/metaslab.h>
#include <sys/dmu.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_synctask.h>
#include <sys/zap.h>
#include <sys/abd.h>
#include <sys/zthr.h>
/*
* An indirect vdev corresponds to a vdev that has been removed. Since
* we cannot rewrite block pointers of snapshots, etc., we keep a
* mapping from old location on the removed device to the new location
* on another device in the pool and use this mapping whenever we need
* to access the DVA. Unfortunately, this mapping did not respect
* logical block boundaries when it was first created, and so a DVA on
* this indirect vdev may be "split" into multiple sections that each
* map to a different location. As a consequence, not all DVAs can be
* translated to an equivalent new DVA. Instead we must provide a
* "vdev_remap" operation that executes a callback on each contiguous
* segment of the new location. This function is used in multiple ways:
*
* - I/Os to this vdev use the callback to determine where the
* data is now located, and issue child I/Os for each segment's new
* location.
*
* - frees and claims to this vdev use the callback to free or claim
* each mapped segment. (Note that we don't actually need to claim
* log blocks on indirect vdevs, because we don't allocate to
* removing vdevs. However, zdb uses zio_claim() for its leak
* detection.)
*/
/*
* "Big theory statement" for how we mark blocks obsolete.
*
* When a block on an indirect vdev is freed or remapped, a section of
* that vdev's mapping may no longer be referenced (aka "obsolete"). We
* keep track of how much of each mapping entry is obsolete. When
* an entry becomes completely obsolete, we can remove it, thus reducing
* the memory used by the mapping. The complete picture of obsolescence
* is given by the following data structures, described below:
* - the entry-specific obsolete count
* - the vdev-specific obsolete spacemap
* - the pool-specific obsolete bpobj
*
* == On disk data structures used ==
*
* We track the obsolete space for the pool using several objects. Each
* of these objects is created on demand and freed when no longer
* needed, and is assumed to be empty if it does not exist.
* SPA_FEATURE_OBSOLETE_COUNTS includes the count of these objects.
*
* - Each vic_mapping_object (associated with an indirect vdev) can
* have a vimp_counts_object. This is an array of uint32_t's
* with the same number of entries as the vic_mapping_object. When
* the mapping is condensed, entries from the vic_obsolete_sm_object
* (see below) are folded into the counts. Therefore, each
* obsolete_counts entry tells us the number of bytes in the
* corresponding mapping entry that were not referenced when the
* mapping was last condensed.
*
* - Each indirect or removing vdev can have a vic_obsolete_sm_object.
* This is a space map containing an alloc entry for every DVA that
* has been obsoleted since the last time this indirect vdev was
* condensed. We use this object in order to improve performance
* when marking a DVA as obsolete. Instead of modifying an arbitrary
* offset of the vimp_counts_object, we only need to append an entry
* to the end of this object. When a DVA becomes obsolete, it is
* added to the obsolete space map. This happens when the DVA is
* freed, remapped and not referenced by a snapshot, or the last
* snapshot referencing it is destroyed.
*
* - Each dataset can have a ds_remap_deadlist object. This is a
* deadlist object containing all blocks that were remapped in this
* dataset but referenced in a previous snapshot. Blocks can *only*
* appear on this list if they were remapped (dsl_dataset_block_remapped);
* blocks that were killed in a head dataset are put on the normal
* ds_deadlist and marked obsolete when they are freed.
*
* - The pool can have a dp_obsolete_bpobj. This is a list of blocks
* in the pool that need to be marked obsolete. When a snapshot is
* destroyed, we move some of the ds_remap_deadlist to the obsolete
* bpobj (see dsl_destroy_snapshot_handle_remaps()). We then
* asynchronously process the obsolete bpobj, moving its entries to
* the specific vdevs' obsolete space maps.
*
* == Summary of how we mark blocks as obsolete ==
*
* - When freeing a block: if any DVA is on an indirect vdev, append to
* vic_obsolete_sm_object.
* - When remapping a block, add dva to ds_remap_deadlist (if prev snap
* references; otherwise append to vic_obsolete_sm_object).
* - When freeing a snapshot: move parts of ds_remap_deadlist to
* dp_obsolete_bpobj (same algorithm as ds_deadlist).
* - When syncing the spa: process dp_obsolete_bpobj, moving ranges to
* individual vdev's vic_obsolete_sm_object.
*/
/*
* "Big theory statement" for how we condense indirect vdevs.
*
* Condensing an indirect vdev's mapping is the process of determining
* the precise counts of obsolete space for each mapping entry (by
* integrating the obsolete spacemap into the obsolete counts) and
* writing out a new mapping that contains only referenced entries.
*
* We condense a vdev when we expect the mapping to shrink (see
* vdev_indirect_should_condense()), but only perform one condense at a
* time to limit the memory usage. In addition, we use a separate
* open-context thread (spa_condense_indirect_thread) to incrementally
* create the new mapping object in a way that minimizes the impact on
* the rest of the system.
*
* == Generating a new mapping ==
*
* To generate a new mapping, we follow these steps:
*
* 1. Save the old obsolete space map and create a new mapping object
* (see spa_condense_indirect_start_sync()). This initializes the
* spa_condensing_indirect_phys with the "previous obsolete space map",
* which is now read only. Newly obsolete DVAs will be added to a
* new (initially empty) obsolete space map, and will not be
* considered as part of this condense operation.
*
* 2. Construct in memory the precise counts of obsolete space for each
* mapping entry, by incorporating the obsolete space map into the
* counts. (See vdev_indirect_mapping_load_obsolete_{counts,spacemap}().)
*
* 3. Iterate through each mapping entry, writing to the new mapping any
* entries that are not completely obsolete (i.e. which don't have
* obsolete count == mapping length). (See
* spa_condense_indirect_generate_new_mapping().)
*
* 4. Destroy the old mapping object and switch over to the new one
* (spa_condense_indirect_complete_sync).
*
* == Restarting from failure ==
*
* To restart the condense when we import/open the pool, we must start
* at the 2nd step above: reconstruct the precise counts in memory,
* based on the space map + counts. Then in the 3rd step, we start
* iterating where we left off: at vimp_max_offset of the new mapping
* object.
*/
static int zfs_condense_indirect_vdevs_enable = B_TRUE;
/*
* Condense if at least this percent of the bytes in the mapping is
* obsolete. With the default of 25%, the amount of space mapped
* will be reduced to 1% of its original size after at most 16
* condenses. Higher values will condense less often (causing less
* i/o); lower values will reduce the mapping size more quickly.
*/
static uint_t zfs_condense_indirect_obsolete_pct = 25;
/*
* Condense if the obsolete space map takes up more than this amount of
* space on disk (logically). This limits the amount of disk space
* consumed by the obsolete space map; the default of 1GB is small enough
* that we typically don't mind "wasting" it.
*/
static uint64_t zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024;
/*
* Don't bother condensing if the mapping uses less than this amount of
* memory. The default of 128KB is considered a "trivial" amount of
* memory and not worth reducing.
*/
static uint64_t zfs_condense_min_mapping_bytes = 128 * 1024;
/*
* This is used by the test suite so that it can ensure that certain
* actions happen while in the middle of a condense (which might otherwise
* complete too quickly). If used to reduce the performance impact of
* condensing in production, a maximum value of 1 should be sufficient.
*/
static uint_t zfs_condense_indirect_commit_entry_delay_ms = 0;
/*
* If an indirect split block contains more than this many possible unique
* combinations when being reconstructed, consider it too computationally
* expensive to check them all. Instead, try at most 100 randomly-selected
* combinations each time the block is accessed. This allows all segment
* copies to participate fairly in the reconstruction when all combinations
* cannot be checked and prevents repeated use of one bad copy.
*/
uint_t zfs_reconstruct_indirect_combinations_max = 4096;
/*
* Enable to simulate damaged segments and validate reconstruction. This
* is intentionally not exposed as a module parameter.
*/
unsigned long zfs_reconstruct_indirect_damage_fraction = 0;
/*
* The indirect_child_t represents the vdev that we will read from, when we
* need to read all copies of the data (e.g. for scrub or reconstruction).
* For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror),
* ic_vdev is the same as is_vdev. However, for mirror top-level vdevs,
* ic_vdev is a child of the mirror.
*/
typedef struct indirect_child {
abd_t *ic_data;
vdev_t *ic_vdev;
/*
* ic_duplicate is NULL when the ic_data contents are unique, when it
* is determined to be a duplicate it references the primary child.
*/
struct indirect_child *ic_duplicate;
list_node_t ic_node; /* node on is_unique_child */
int ic_error; /* set when a child does not contain the data */
} indirect_child_t;
/*
* The indirect_split_t represents one mapped segment of an i/o to the
* indirect vdev. For non-split (contiguously-mapped) blocks, there will be
* only one indirect_split_t, with is_split_offset==0 and is_size==io_size.
* For split blocks, there will be several of these.
*/
typedef struct indirect_split {
list_node_t is_node; /* link on iv_splits */
/*
* is_split_offset is the offset into the i/o.
* This is the sum of the previous splits' is_size's.
*/
uint64_t is_split_offset;
vdev_t *is_vdev; /* top-level vdev */
uint64_t is_target_offset; /* offset on is_vdev */
uint64_t is_size;
int is_children; /* number of entries in is_child[] */
int is_unique_children; /* number of entries in is_unique_child */
list_t is_unique_child;
/*
* is_good_child is the child that we are currently using to
* attempt reconstruction.
*/
indirect_child_t *is_good_child;
indirect_child_t is_child[];
} indirect_split_t;
/*
* The indirect_vsd_t is associated with each i/o to the indirect vdev.
* It is the "Vdev-Specific Data" in the zio_t's io_vsd.
*/
typedef struct indirect_vsd {
boolean_t iv_split_block;
boolean_t iv_reconstruct;
uint64_t iv_unique_combinations;
uint64_t iv_attempts;
uint64_t iv_attempts_max;
list_t iv_splits; /* list of indirect_split_t's */
} indirect_vsd_t;
static void
vdev_indirect_map_free(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
indirect_split_t *is;
while ((is = list_remove_head(&iv->iv_splits)) != NULL) {
for (int c = 0; c < is->is_children; c++) {
indirect_child_t *ic = &is->is_child[c];
if (ic->ic_data != NULL)
abd_free(ic->ic_data);
}
indirect_child_t *ic;
while ((ic = list_remove_head(&is->is_unique_child)) != NULL)
;
list_destroy(&is->is_unique_child);
kmem_free(is,
offsetof(indirect_split_t, is_child[is->is_children]));
}
kmem_free(iv, sizeof (*iv));
}
static const zio_vsd_ops_t vdev_indirect_vsd_ops = {
.vsd_free = vdev_indirect_map_free,
};
/*
* Mark the given offset and size as being obsolete.
*/
void
vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset, uint64_t size)
{
spa_t *spa = vd->vdev_spa;
ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, !=, 0);
ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
ASSERT(size > 0);
VERIFY(vdev_indirect_mapping_entry_for_offset(
vd->vdev_indirect_mapping, offset) != NULL);
if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
mutex_enter(&vd->vdev_obsolete_lock);
range_tree_add(vd->vdev_obsolete_segments, offset, size);
mutex_exit(&vd->vdev_obsolete_lock);
vdev_dirty(vd, 0, NULL, spa_syncing_txg(spa));
}
}
/*
* Mark the DVA vdev_id:offset:size as being obsolete in the given tx. This
* wrapper is provided because the DMU does not know about vdev_t's and
* cannot directly call vdev_indirect_mark_obsolete.
*/
void
spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev_id, uint64_t offset,
uint64_t size, dmu_tx_t *tx)
{
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
ASSERT(dmu_tx_is_syncing(tx));
/* The DMU can only remap indirect vdevs. */
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
vdev_indirect_mark_obsolete(vd, offset, size);
}
static spa_condensing_indirect_t *
spa_condensing_indirect_create(spa_t *spa)
{
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
spa_condensing_indirect_t *sci = kmem_zalloc(sizeof (*sci), KM_SLEEP);
objset_t *mos = spa->spa_meta_objset;
for (int i = 0; i < TXG_SIZE; i++) {
list_create(&sci->sci_new_mapping_entries[i],
sizeof (vdev_indirect_mapping_entry_t),
offsetof(vdev_indirect_mapping_entry_t, vime_node));
}
sci->sci_new_mapping =
vdev_indirect_mapping_open(mos, scip->scip_next_mapping_object);
return (sci);
}
static void
spa_condensing_indirect_destroy(spa_condensing_indirect_t *sci)
{
for (int i = 0; i < TXG_SIZE; i++)
list_destroy(&sci->sci_new_mapping_entries[i]);
if (sci->sci_new_mapping != NULL)
vdev_indirect_mapping_close(sci->sci_new_mapping);
kmem_free(sci, sizeof (*sci));
}
boolean_t
vdev_indirect_should_condense(vdev_t *vd)
{
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
spa_t *spa = vd->vdev_spa;
ASSERT(dsl_pool_sync_context(spa->spa_dsl_pool));
if (!zfs_condense_indirect_vdevs_enable)
return (B_FALSE);
/*
* We can only condense one indirect vdev at a time.
*/
if (spa->spa_condensing_indirect != NULL)
return (B_FALSE);
if (spa_shutting_down(spa))
return (B_FALSE);
/*
* The mapping object size must not change while we are
* condensing, so we can only condense indirect vdevs
* (not vdevs that are still in the middle of being removed).
*/
if (vd->vdev_ops != &vdev_indirect_ops)
return (B_FALSE);
/*
* If nothing new has been marked obsolete, there is no
* point in condensing.
*/
uint64_t obsolete_sm_obj __maybe_unused;
ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj));
if (vd->vdev_obsolete_sm == NULL) {
ASSERT0(obsolete_sm_obj);
return (B_FALSE);
}
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT3U(obsolete_sm_obj, ==, space_map_object(vd->vdev_obsolete_sm));
uint64_t bytes_mapped = vdev_indirect_mapping_bytes_mapped(vim);
uint64_t bytes_obsolete = space_map_allocated(vd->vdev_obsolete_sm);
uint64_t mapping_size = vdev_indirect_mapping_size(vim);
uint64_t obsolete_sm_size = space_map_length(vd->vdev_obsolete_sm);
ASSERT3U(bytes_obsolete, <=, bytes_mapped);
/*
* If a high percentage of the bytes that are mapped have become
* obsolete, condense (unless the mapping is already small enough).
* This has a good chance of reducing the amount of memory used
* by the mapping.
*/
if (bytes_obsolete * 100 / bytes_mapped >=
zfs_condense_indirect_obsolete_pct &&
mapping_size > zfs_condense_min_mapping_bytes) {
zfs_dbgmsg("should condense vdev %llu because obsolete "
"spacemap covers %d%% of %lluMB mapping",
(u_longlong_t)vd->vdev_id,
(int)(bytes_obsolete * 100 / bytes_mapped),
(u_longlong_t)bytes_mapped / 1024 / 1024);
return (B_TRUE);
}
/*
* If the obsolete space map takes up too much space on disk,
* condense in order to free up this disk space.
*/
if (obsolete_sm_size >= zfs_condense_max_obsolete_bytes) {
zfs_dbgmsg("should condense vdev %llu because obsolete sm "
"length %lluMB >= max size %lluMB",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)obsolete_sm_size / 1024 / 1024,
(u_longlong_t)zfs_condense_max_obsolete_bytes /
1024 / 1024);
return (B_TRUE);
}
return (B_FALSE);
}
/*
* This sync task completes (finishes) a condense, deleting the old
* mapping and replacing it with the new one.
*/
static void
spa_condense_indirect_complete_sync(void *arg, dmu_tx_t *tx)
{
spa_condensing_indirect_t *sci = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
vdev_t *vd = vdev_lookup_top(spa, scip->scip_vdev);
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
objset_t *mos = spa->spa_meta_objset;
vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
uint64_t old_count = vdev_indirect_mapping_num_entries(old_mapping);
uint64_t new_count =
vdev_indirect_mapping_num_entries(sci->sci_new_mapping);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
ASSERT3P(sci, ==, spa->spa_condensing_indirect);
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
}
ASSERT(vic->vic_mapping_object != 0);
ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
ASSERT(scip->scip_next_mapping_object != 0);
ASSERT(scip->scip_prev_obsolete_sm_object != 0);
/*
* Reset vdev_indirect_mapping to refer to the new object.
*/
rw_enter(&vd->vdev_indirect_rwlock, RW_WRITER);
vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
vd->vdev_indirect_mapping = sci->sci_new_mapping;
rw_exit(&vd->vdev_indirect_rwlock);
sci->sci_new_mapping = NULL;
vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
vic->vic_mapping_object = scip->scip_next_mapping_object;
scip->scip_next_mapping_object = 0;
space_map_free_obj(mos, scip->scip_prev_obsolete_sm_object, tx);
spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
scip->scip_prev_obsolete_sm_object = 0;
scip->scip_vdev = 0;
VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CONDENSING_INDIRECT, tx));
spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
spa->spa_condensing_indirect = NULL;
zfs_dbgmsg("finished condense of vdev %llu in txg %llu: "
"new mapping object %llu has %llu entries "
"(was %llu entries)",
(u_longlong_t)vd->vdev_id, (u_longlong_t)dmu_tx_get_txg(tx),
(u_longlong_t)vic->vic_mapping_object,
(u_longlong_t)new_count, (u_longlong_t)old_count);
vdev_config_dirty(spa->spa_root_vdev);
}
/*
* This sync task appends entries to the new mapping object.
*/
static void
spa_condense_indirect_commit_sync(void *arg, dmu_tx_t *tx)
{
spa_condensing_indirect_t *sci = arg;
uint64_t txg = dmu_tx_get_txg(tx);
spa_t *spa __maybe_unused = dmu_tx_pool(tx)->dp_spa;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT3P(sci, ==, spa->spa_condensing_indirect);
vdev_indirect_mapping_add_entries(sci->sci_new_mapping,
&sci->sci_new_mapping_entries[txg & TXG_MASK], tx);
ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK]));
}
/*
* Open-context function to add one entry to the new mapping. The new
* entry will be remembered and written from syncing context.
*/
static void
spa_condense_indirect_commit_entry(spa_t *spa,
vdev_indirect_mapping_entry_phys_t *vimep, uint32_t count)
{
spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
ASSERT3U(count, <, DVA_GET_ASIZE(&vimep->vimep_dst));
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count));
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
/*
* If we are the first entry committed this txg, kick off the sync
* task to write to the MOS on our behalf.
*/
if (list_is_empty(&sci->sci_new_mapping_entries[txgoff])) {
dsl_sync_task_nowait(dmu_tx_pool(tx),
spa_condense_indirect_commit_sync, sci, tx);
}
vdev_indirect_mapping_entry_t *vime =
kmem_alloc(sizeof (*vime), KM_SLEEP);
vime->vime_mapping = *vimep;
vime->vime_obsolete_count = count;
list_insert_tail(&sci->sci_new_mapping_entries[txgoff], vime);
dmu_tx_commit(tx);
}
static void
spa_condense_indirect_generate_new_mapping(vdev_t *vd,
uint32_t *obsolete_counts, uint64_t start_index, zthr_t *zthr)
{
spa_t *spa = vd->vdev_spa;
uint64_t mapi = start_index;
vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
uint64_t old_num_entries =
vdev_indirect_mapping_num_entries(old_mapping);
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
ASSERT3U(vd->vdev_id, ==, spa->spa_condensing_indirect_phys.scip_vdev);
zfs_dbgmsg("starting condense of vdev %llu from index %llu",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)mapi);
while (mapi < old_num_entries) {
if (zthr_iscancelled(zthr)) {
zfs_dbgmsg("pausing condense of vdev %llu "
"at index %llu", (u_longlong_t)vd->vdev_id,
(u_longlong_t)mapi);
break;
}
vdev_indirect_mapping_entry_phys_t *entry =
&old_mapping->vim_entries[mapi];
uint64_t entry_size = DVA_GET_ASIZE(&entry->vimep_dst);
ASSERT3U(obsolete_counts[mapi], <=, entry_size);
if (obsolete_counts[mapi] < entry_size) {
spa_condense_indirect_commit_entry(spa, entry,
obsolete_counts[mapi]);
/*
* This delay may be requested for testing, debugging,
* or performance reasons.
*/
hrtime_t now = gethrtime();
hrtime_t sleep_until = now + MSEC2NSEC(
zfs_condense_indirect_commit_entry_delay_ms);
zfs_sleep_until(sleep_until);
}
mapi++;
}
}
static boolean_t
spa_condense_indirect_thread_check(void *arg, zthr_t *zthr)
{
(void) zthr;
spa_t *spa = arg;
return (spa->spa_condensing_indirect != NULL);
}
static void
spa_condense_indirect_thread(void *arg, zthr_t *zthr)
{
spa_t *spa = arg;
vdev_t *vd;
ASSERT3P(spa->spa_condensing_indirect, !=, NULL);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
vd = vdev_lookup_top(spa, spa->spa_condensing_indirect_phys.scip_vdev);
ASSERT3P(vd, !=, NULL);
spa_config_exit(spa, SCL_VDEV, FTAG);
spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
uint32_t *counts;
uint64_t start_index;
vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
space_map_t *prev_obsolete_sm = NULL;
ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
ASSERT(scip->scip_next_mapping_object != 0);
ASSERT(scip->scip_prev_obsolete_sm_object != 0);
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
for (int i = 0; i < TXG_SIZE; i++) {
/*
* The list must start out empty in order for the
* _commit_sync() sync task to be properly registered
* on the first call to _commit_entry(); so it's wise
* to double check and ensure we actually are starting
* with empty lists.
*/
ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
}
VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
counts = vdev_indirect_mapping_load_obsolete_counts(old_mapping);
if (prev_obsolete_sm != NULL) {
vdev_indirect_mapping_load_obsolete_spacemap(old_mapping,
counts, prev_obsolete_sm);
}
space_map_close(prev_obsolete_sm);
/*
* Generate new mapping. Determine what index to continue from
* based on the max offset that we've already written in the
* new mapping.
*/
uint64_t max_offset =
vdev_indirect_mapping_max_offset(sci->sci_new_mapping);
if (max_offset == 0) {
/* We haven't written anything to the new mapping yet. */
start_index = 0;
} else {
/*
* Pick up from where we left off. _entry_for_offset()
* returns a pointer into the vim_entries array. If
* max_offset is greater than any of the mappings
* contained in the table NULL will be returned and
* that indicates we've exhausted our iteration of the
* old_mapping.
*/
vdev_indirect_mapping_entry_phys_t *entry =
vdev_indirect_mapping_entry_for_offset_or_next(old_mapping,
max_offset);
if (entry == NULL) {
/*
* We've already written the whole new mapping.
* This special value will cause us to skip the
* generate_new_mapping step and just do the sync
* task to complete the condense.
*/
start_index = UINT64_MAX;
} else {
start_index = entry - old_mapping->vim_entries;
ASSERT3U(start_index, <,
vdev_indirect_mapping_num_entries(old_mapping));
}
}
spa_condense_indirect_generate_new_mapping(vd, counts,
start_index, zthr);
vdev_indirect_mapping_free_obsolete_counts(old_mapping, counts);
/*
* If the zthr has received a cancellation signal while running
* in generate_new_mapping() or at any point after that, then bail
* early. We don't want to complete the condense if the spa is
* shutting down.
*/
if (zthr_iscancelled(zthr))
return;
VERIFY0(dsl_sync_task(spa_name(spa), NULL,
spa_condense_indirect_complete_sync, sci, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
/*
* Sync task to begin the condensing process.
*/
void
spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
ASSERT0(scip->scip_next_mapping_object);
ASSERT0(scip->scip_prev_obsolete_sm_object);
ASSERT0(scip->scip_vdev);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_OBSOLETE_COUNTS));
ASSERT(vdev_indirect_mapping_num_entries(vd->vdev_indirect_mapping));
uint64_t obsolete_sm_obj;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj));
ASSERT3U(obsolete_sm_obj, !=, 0);
scip->scip_vdev = vd->vdev_id;
scip->scip_next_mapping_object =
vdev_indirect_mapping_alloc(spa->spa_meta_objset, tx);
scip->scip_prev_obsolete_sm_object = obsolete_sm_obj;
/*
* We don't need to allocate a new space map object, since
* vdev_indirect_sync_obsolete will allocate one when needed.
*/
space_map_close(vd->vdev_obsolete_sm);
vd->vdev_obsolete_sm = NULL;
VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
sizeof (*scip) / sizeof (uint64_t), scip, tx));
ASSERT3P(spa->spa_condensing_indirect, ==, NULL);
spa->spa_condensing_indirect = spa_condensing_indirect_create(spa);
zfs_dbgmsg("starting condense of vdev %llu in txg %llu: "
"posm=%llu nm=%llu",
(u_longlong_t)vd->vdev_id, (u_longlong_t)dmu_tx_get_txg(tx),
(u_longlong_t)scip->scip_prev_obsolete_sm_object,
(u_longlong_t)scip->scip_next_mapping_object);
zthr_wakeup(spa->spa_condense_zthr);
}
/*
* Sync to the given vdev's obsolete space map any segments that are no longer
* referenced as of the given txg.
*
* If the obsolete space map doesn't exist yet, create and open it.
*/
void
vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
vdev_indirect_config_t *vic __maybe_unused = &vd->vdev_indirect_config;
ASSERT3U(vic->vic_mapping_object, !=, 0);
ASSERT(range_tree_space(vd->vdev_obsolete_segments) > 0);
ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS));
uint64_t obsolete_sm_object;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object == 0) {
obsolete_sm_object = space_map_alloc(spa->spa_meta_objset,
zfs_vdev_standard_sm_blksz, tx);
ASSERT(vd->vdev_top_zap != 0);
VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM,
sizeof (obsolete_sm_object), 1, &obsolete_sm_object, tx));
ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
ASSERT3U(obsolete_sm_object, !=, 0);
spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
VERIFY0(space_map_open(&vd->vdev_obsolete_sm,
spa->spa_meta_objset, obsolete_sm_object,
0, vd->vdev_asize, 0));
}
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT3U(obsolete_sm_object, ==,
space_map_object(vd->vdev_obsolete_sm));
space_map_write(vd->vdev_obsolete_sm,
vd->vdev_obsolete_segments, SM_ALLOC, SM_NO_VDEVID, tx);
range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
}
int
spa_condense_init(spa_t *spa)
{
int error = zap_lookup(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
sizeof (spa->spa_condensing_indirect_phys) / sizeof (uint64_t),
&spa->spa_condensing_indirect_phys);
if (error == 0) {
if (spa_writeable(spa)) {
spa->spa_condensing_indirect =
spa_condensing_indirect_create(spa);
}
return (0);
} else if (error == ENOENT) {
return (0);
} else {
return (error);
}
}
void
spa_condense_fini(spa_t *spa)
{
if (spa->spa_condensing_indirect != NULL) {
spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
spa->spa_condensing_indirect = NULL;
}
}
void
spa_start_indirect_condensing_thread(spa_t *spa)
{
ASSERT3P(spa->spa_condense_zthr, ==, NULL);
spa->spa_condense_zthr = zthr_create("z_indirect_condense",
spa_condense_indirect_thread_check,
spa_condense_indirect_thread, spa, minclsyspri);
}
/*
* Gets the obsolete spacemap object from the vdev's ZAP. On success sm_obj
* will contain either the obsolete spacemap object or zero if none exists.
* All other errors are returned to the caller.
*/
int
vdev_obsolete_sm_object(vdev_t *vd, uint64_t *sm_obj)
{
ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
if (vd->vdev_top_zap == 0) {
*sm_obj = 0;
return (0);
}
int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, sizeof (uint64_t), 1, sm_obj);
if (error == ENOENT) {
*sm_obj = 0;
error = 0;
}
return (error);
}
/*
* Gets the obsolete count are precise spacemap object from the vdev's ZAP.
* On success are_precise will be set to reflect if the counts are precise.
* All other errors are returned to the caller.
*/
int
vdev_obsolete_counts_are_precise(vdev_t *vd, boolean_t *are_precise)
{
ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
if (vd->vdev_top_zap == 0) {
*are_precise = B_FALSE;
return (0);
}
uint64_t val = 0;
int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (val), 1, &val);
if (error == 0) {
*are_precise = (val != 0);
} else if (error == ENOENT) {
*are_precise = B_FALSE;
error = 0;
}
return (error);
}
static void
vdev_indirect_close(vdev_t *vd)
{
(void) vd;
}
static int
vdev_indirect_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
*psize = *max_psize = vd->vdev_asize +
VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
*logical_ashift = vd->vdev_ashift;
*physical_ashift = vd->vdev_physical_ashift;
return (0);
}
typedef struct remap_segment {
vdev_t *rs_vd;
uint64_t rs_offset;
uint64_t rs_asize;
uint64_t rs_split_offset;
list_node_t rs_node;
} remap_segment_t;
static remap_segment_t *
rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset)
{
remap_segment_t *rs = kmem_alloc(sizeof (remap_segment_t), KM_SLEEP);
rs->rs_vd = vd;
rs->rs_offset = offset;
rs->rs_asize = asize;
rs->rs_split_offset = split_offset;
return (rs);
}
/*
* Given an indirect vdev and an extent on that vdev, it duplicates the
* physical entries of the indirect mapping that correspond to the extent
* to a new array and returns a pointer to it. In addition, copied_entries
* is populated with the number of mapping entries that were duplicated.
*
* Note that the function assumes that the caller holds vdev_indirect_rwlock.
* This ensures that the mapping won't change due to condensing as we
* copy over its contents.
*
* Finally, since we are doing an allocation, it is up to the caller to
* free the array allocated in this function.
*/
static vdev_indirect_mapping_entry_phys_t *
vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset,
uint64_t asize, uint64_t *copied_entries)
{
vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
uint64_t entries = 0;
ASSERT(RW_READ_HELD(&vd->vdev_indirect_rwlock));
vdev_indirect_mapping_entry_phys_t *first_mapping =
vdev_indirect_mapping_entry_for_offset(vim, offset);
ASSERT3P(first_mapping, !=, NULL);
vdev_indirect_mapping_entry_phys_t *m = first_mapping;
while (asize > 0) {
uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
ASSERT3U(offset, >=, DVA_MAPPING_GET_SRC_OFFSET(m));
ASSERT3U(offset, <, DVA_MAPPING_GET_SRC_OFFSET(m) + size);
uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m);
uint64_t inner_size = MIN(asize, size - inner_offset);
offset += inner_size;
asize -= inner_size;
entries++;
m++;
}
size_t copy_length = entries * sizeof (*first_mapping);
duplicate_mappings = kmem_alloc(copy_length, KM_SLEEP);
memcpy(duplicate_mappings, first_mapping, copy_length);
*copied_entries = entries;
return (duplicate_mappings);
}
/*
* Goes through the relevant indirect mappings until it hits a concrete vdev
* and issues the callback. On the way to the concrete vdev, if any other
* indirect vdevs are encountered, then the callback will also be called on
* each of those indirect vdevs. For example, if the segment is mapped to
* segment A on indirect vdev 1, and then segment A on indirect vdev 1 is
* mapped to segment B on concrete vdev 2, then the callback will be called on
* both vdev 1 and vdev 2.
*
* While the callback passed to vdev_indirect_remap() is called on every vdev
* the function encounters, certain callbacks only care about concrete vdevs.
* These types of callbacks should return immediately and explicitly when they
* are called on an indirect vdev.
*
* Because there is a possibility that a DVA section in the indirect device
* has been split into multiple sections in our mapping, we keep track
* of the relevant contiguous segments of the new location (remap_segment_t)
* in a stack. This way we can call the callback for each of the new sections
* created by a single section of the indirect device. Note though, that in
* this scenario the callbacks in each split block won't occur in-order in
* terms of offset, so callers should not make any assumptions about that.
*
* For callbacks that don't handle split blocks and immediately return when
* they encounter them (as is the case for remap_blkptr_cb), the caller can
* assume that its callback will be applied from the first indirect vdev
* encountered to the last one and then the concrete vdev, in that order.
*/
static void
vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize,
void (*func)(uint64_t, vdev_t *, uint64_t, uint64_t, void *), void *arg)
{
list_t stack;
spa_t *spa = vd->vdev_spa;
list_create(&stack, sizeof (remap_segment_t),
offsetof(remap_segment_t, rs_node));
for (remap_segment_t *rs = rs_alloc(vd, offset, asize, 0);
rs != NULL; rs = list_remove_head(&stack)) {
vdev_t *v = rs->rs_vd;
uint64_t num_entries = 0;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
ASSERT(rs->rs_asize > 0);
/*
* Note: As this function can be called from open context
* (e.g. zio_read()), we need the following rwlock to
* prevent the mapping from being changed by condensing.
*
* So we grab the lock and we make a copy of the entries
* that are relevant to the extent that we are working on.
* Once that is done, we drop the lock and iterate over
* our copy of the mapping. Once we are done with the with
* the remap segment and we free it, we also free our copy
* of the indirect mapping entries that are relevant to it.
*
* This way we don't need to wait until the function is
* finished with a segment, to condense it. In addition, we
* don't need a recursive rwlock for the case that a call to
* vdev_indirect_remap() needs to call itself (through the
* codepath of its callback) for the same vdev in the middle
* of its execution.
*/
rw_enter(&v->vdev_indirect_rwlock, RW_READER);
ASSERT3P(v->vdev_indirect_mapping, !=, NULL);
vdev_indirect_mapping_entry_phys_t *mapping =
vdev_indirect_mapping_duplicate_adjacent_entries(v,
rs->rs_offset, rs->rs_asize, &num_entries);
ASSERT3P(mapping, !=, NULL);
ASSERT3U(num_entries, >, 0);
rw_exit(&v->vdev_indirect_rwlock);
for (uint64_t i = 0; i < num_entries; i++) {
/*
* Note: the vdev_indirect_mapping can not change
* while we are running. It only changes while the
* removal is in progress, and then only from syncing
* context. While a removal is in progress, this
* function is only called for frees, which also only
* happen from syncing context.
*/
vdev_indirect_mapping_entry_phys_t *m = &mapping[i];
ASSERT3P(m, !=, NULL);
ASSERT3U(rs->rs_asize, >, 0);
uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst);
uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst);
ASSERT3U(rs->rs_offset, >=,
DVA_MAPPING_GET_SRC_OFFSET(m));
ASSERT3U(rs->rs_offset, <,
DVA_MAPPING_GET_SRC_OFFSET(m) + size);
ASSERT3U(dst_vdev, !=, v->vdev_id);
uint64_t inner_offset = rs->rs_offset -
DVA_MAPPING_GET_SRC_OFFSET(m);
uint64_t inner_size =
MIN(rs->rs_asize, size - inner_offset);
vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev);
ASSERT3P(dst_v, !=, NULL);
if (dst_v->vdev_ops == &vdev_indirect_ops) {
list_insert_head(&stack,
rs_alloc(dst_v, dst_offset + inner_offset,
inner_size, rs->rs_split_offset));
}
if ((zfs_flags & ZFS_DEBUG_INDIRECT_REMAP) &&
IS_P2ALIGNED(inner_size, 2 * SPA_MINBLOCKSIZE)) {
/*
* Note: This clause exists only solely for
* testing purposes. We use it to ensure that
* split blocks work and that the callbacks
* using them yield the same result if issued
* in reverse order.
*/
uint64_t inner_half = inner_size / 2;
func(rs->rs_split_offset + inner_half, dst_v,
dst_offset + inner_offset + inner_half,
inner_half, arg);
func(rs->rs_split_offset, dst_v,
dst_offset + inner_offset,
inner_half, arg);
} else {
func(rs->rs_split_offset, dst_v,
dst_offset + inner_offset,
inner_size, arg);
}
rs->rs_offset += inner_size;
rs->rs_asize -= inner_size;
rs->rs_split_offset += inner_size;
}
VERIFY0(rs->rs_asize);
kmem_free(mapping, num_entries * sizeof (*mapping));
kmem_free(rs, sizeof (remap_segment_t));
}
list_destroy(&stack);
}
static void
vdev_indirect_child_io_done(zio_t *zio)
{
zio_t *pio = zio->io_private;
mutex_enter(&pio->io_lock);
pio->io_error = zio_worst_error(pio->io_error, zio->io_error);
mutex_exit(&pio->io_lock);
abd_free(zio->io_abd);
}
/*
* This is a callback for vdev_indirect_remap() which allocates an
* indirect_split_t for each split segment and adds it to iv_splits.
*/
static void
vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
zio_t *zio = arg;
indirect_vsd_t *iv = zio->io_vsd;
ASSERT3P(vd, !=, NULL);
if (vd->vdev_ops == &vdev_indirect_ops)
return;
int n = 1;
if (vd->vdev_ops == &vdev_mirror_ops)
n = vd->vdev_children;
indirect_split_t *is =
kmem_zalloc(offsetof(indirect_split_t, is_child[n]), KM_SLEEP);
is->is_children = n;
is->is_size = size;
is->is_split_offset = split_offset;
is->is_target_offset = offset;
is->is_vdev = vd;
list_create(&is->is_unique_child, sizeof (indirect_child_t),
offsetof(indirect_child_t, ic_node));
/*
* Note that we only consider multiple copies of the data for
* *mirror* vdevs. We don't for "replacing" or "spare" vdevs, even
* though they use the same ops as mirror, because there's only one
* "good" copy under the replacing/spare.
*/
if (vd->vdev_ops == &vdev_mirror_ops) {
for (int i = 0; i < n; i++) {
is->is_child[i].ic_vdev = vd->vdev_child[i];
list_link_init(&is->is_child[i].ic_node);
}
} else {
is->is_child[0].ic_vdev = vd;
}
list_insert_tail(&iv->iv_splits, is);
}
static void
vdev_indirect_read_split_done(zio_t *zio)
{
indirect_child_t *ic = zio->io_private;
if (zio->io_error != 0) {
/*
* Clear ic_data to indicate that we do not have data for this
* child.
*/
abd_free(ic->ic_data);
ic->ic_data = NULL;
}
}
/*
* Issue reads for all copies (mirror children) of all splits.
*/
static void
vdev_indirect_read_all(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
for (int i = 0; i < is->is_children; i++) {
indirect_child_t *ic = &is->is_child[i];
if (!vdev_readable(ic->ic_vdev))
continue;
/*
* If a child is missing the data, set ic_error. Used
* in vdev_indirect_repair(). We perform the read
* nevertheless which provides the opportunity to
* reconstruct the split block if at all possible.
*/
if (vdev_dtl_contains(ic->ic_vdev, DTL_MISSING,
zio->io_txg, 1))
ic->ic_error = SET_ERROR(ESTALE);
ic->ic_data = abd_alloc_sametype(zio->io_abd,
is->is_size);
ic->ic_duplicate = NULL;
zio_nowait(zio_vdev_child_io(zio, NULL,
ic->ic_vdev, is->is_target_offset, ic->ic_data,
is->is_size, zio->io_type, zio->io_priority, 0,
vdev_indirect_read_split_done, ic));
}
}
iv->iv_reconstruct = B_TRUE;
}
static void
vdev_indirect_io_start(zio_t *zio)
{
spa_t *spa __maybe_unused = zio->io_spa;
indirect_vsd_t *iv = kmem_zalloc(sizeof (*iv), KM_SLEEP);
list_create(&iv->iv_splits,
sizeof (indirect_split_t), offsetof(indirect_split_t, is_node));
zio->io_vsd = iv;
zio->io_vsd_ops = &vdev_indirect_vsd_ops;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
if (zio->io_type != ZIO_TYPE_READ) {
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
/*
* Note: this code can handle other kinds of writes,
* but we don't expect them.
*/
ASSERT((zio->io_flags & (ZIO_FLAG_SELF_HEAL |
ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)) != 0);
}
vdev_indirect_remap(zio->io_vd, zio->io_offset, zio->io_size,
vdev_indirect_gather_splits, zio);
indirect_split_t *first = list_head(&iv->iv_splits);
ASSERT3P(first, !=, NULL);
if (first->is_size == zio->io_size) {
/*
* This is not a split block; we are pointing to the entire
* data, which will checksum the same as the original data.
* Pass the BP down so that the child i/o can verify the
* checksum, and try a different location if available
* (e.g. on a mirror).
*
* While this special case could be handled the same as the
* general (split block) case, doing it this way ensures
* that the vast majority of blocks on indirect vdevs
* (which are not split) are handled identically to blocks
* on non-indirect vdevs. This allows us to be less strict
* about performance in the general (but rare) case.
*/
ASSERT0(first->is_split_offset);
ASSERT3P(list_next(&iv->iv_splits, first), ==, NULL);
zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
first->is_vdev, first->is_target_offset,
abd_get_offset(zio->io_abd, 0),
zio->io_size, zio->io_type, zio->io_priority, 0,
vdev_indirect_child_io_done, zio));
} else {
iv->iv_split_block = B_TRUE;
if (zio->io_type == ZIO_TYPE_READ &&
zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)) {
/*
* Read all copies. Note that for simplicity,
* we don't bother consulting the DTL in the
* resilver case.
*/
vdev_indirect_read_all(zio);
} else {
/*
* If this is a read zio, we read one copy of each
* split segment, from the top-level vdev. Since
* we don't know the checksum of each split
* individually, the child zio can't ensure that
* we get the right data. E.g. if it's a mirror,
* it will just read from a random (healthy) leaf
* vdev. We have to verify the checksum in
* vdev_indirect_io_done().
*
* For write zios, the vdev code will ensure we write
* to all children.
*/
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
zio_nowait(zio_vdev_child_io(zio, NULL,
is->is_vdev, is->is_target_offset,
abd_get_offset_size(zio->io_abd,
is->is_split_offset, is->is_size),
is->is_size, zio->io_type,
zio->io_priority, 0,
vdev_indirect_child_io_done, zio));
}
}
}
zio_execute(zio);
}
/*
* Report a checksum error for a child.
*/
static void
vdev_indirect_checksum_error(zio_t *zio,
indirect_split_t *is, indirect_child_t *ic)
{
vdev_t *vd = ic->ic_vdev;
if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
return;
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_checksum_errors++;
mutex_exit(&vd->vdev_stat_lock);
- zio_bad_cksum_t zbc = {{{ 0 }}};
+ zio_bad_cksum_t zbc = { 0 };
abd_t *bad_abd = ic->ic_data;
abd_t *good_abd = is->is_good_child->ic_data;
(void) zfs_ereport_post_checksum(zio->io_spa, vd, NULL, zio,
is->is_target_offset, is->is_size, good_abd, bad_abd, &zbc);
}
/*
* Issue repair i/os for any incorrect copies. We do this by comparing
* each split segment's correct data (is_good_child's ic_data) with each
* other copy of the data. If they differ, then we overwrite the bad data
* with the good copy. The DTL is checked in vdev_indirect_read_all() and
* if a vdev is missing a copy of the data we set ic_error and the read is
* performed. This provides the opportunity to reconstruct the split block
* if at all possible. ic_error is checked here and if set it suppresses
* incrementing the checksum counter. Aside from this DTLs are not checked,
* which simplifies this code and also issues the optimal number of writes
* (based on which copies actually read bad data, as opposed to which we
* think might be wrong). For the same reason, we always use
* ZIO_FLAG_SELF_HEAL, to bypass the DTL check in zio_vdev_io_start().
*/
static void
vdev_indirect_repair(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
if (!spa_writeable(zio->io_spa))
return;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
for (int c = 0; c < is->is_children; c++) {
indirect_child_t *ic = &is->is_child[c];
if (ic == is->is_good_child)
continue;
if (ic->ic_data == NULL)
continue;
if (ic->ic_duplicate == is->is_good_child)
continue;
zio_nowait(zio_vdev_child_io(zio, NULL,
ic->ic_vdev, is->is_target_offset,
is->is_good_child->ic_data, is->is_size,
ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_IO_REPAIR | ZIO_FLAG_SELF_HEAL,
NULL, NULL));
/*
* If ic_error is set the current child does not have
* a copy of the data, so suppress incrementing the
* checksum counter.
*/
if (ic->ic_error == ESTALE)
continue;
vdev_indirect_checksum_error(zio, is, ic);
}
}
}
/*
* Report checksum errors on all children that we read from.
*/
static void
vdev_indirect_all_checksum_errors(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
return;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
for (int c = 0; c < is->is_children; c++) {
indirect_child_t *ic = &is->is_child[c];
if (ic->ic_data == NULL)
continue;
vdev_t *vd = ic->ic_vdev;
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_checksum_errors++;
mutex_exit(&vd->vdev_stat_lock);
(void) zfs_ereport_post_checksum(zio->io_spa, vd,
NULL, zio, is->is_target_offset, is->is_size,
NULL, NULL, NULL);
}
}
}
/*
* Copy data from all the splits to a main zio then validate the checksum.
* If then checksum is successfully validated return success.
*/
static int
vdev_indirect_splits_checksum_validate(indirect_vsd_t *iv, zio_t *zio)
{
zio_bad_cksum_t zbc;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
ASSERT3P(is->is_good_child->ic_data, !=, NULL);
ASSERT3P(is->is_good_child->ic_duplicate, ==, NULL);
abd_copy_off(zio->io_abd, is->is_good_child->ic_data,
is->is_split_offset, 0, is->is_size);
}
return (zio_checksum_error(zio, &zbc));
}
/*
* There are relatively few possible combinations making it feasible to
* deterministically check them all. We do this by setting the good_child
* to the next unique split version. If we reach the end of the list then
* "carry over" to the next unique split version (like counting in base
* is_unique_children, but each digit can have a different base).
*/
static int
vdev_indirect_splits_enumerate_all(indirect_vsd_t *iv, zio_t *zio)
{
boolean_t more = B_TRUE;
iv->iv_attempts = 0;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is))
is->is_good_child = list_head(&is->is_unique_child);
while (more == B_TRUE) {
iv->iv_attempts++;
more = B_FALSE;
if (vdev_indirect_splits_checksum_validate(iv, zio) == 0)
return (0);
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
is->is_good_child = list_next(&is->is_unique_child,
is->is_good_child);
if (is->is_good_child != NULL) {
more = B_TRUE;
break;
}
is->is_good_child = list_head(&is->is_unique_child);
}
}
ASSERT3S(iv->iv_attempts, <=, iv->iv_unique_combinations);
return (SET_ERROR(ECKSUM));
}
/*
* There are too many combinations to try all of them in a reasonable amount
* of time. So try a fixed number of random combinations from the unique
* split versions, after which we'll consider the block unrecoverable.
*/
static int
vdev_indirect_splits_enumerate_randomly(indirect_vsd_t *iv, zio_t *zio)
{
iv->iv_attempts = 0;
while (iv->iv_attempts < iv->iv_attempts_max) {
iv->iv_attempts++;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
indirect_child_t *ic = list_head(&is->is_unique_child);
int children = is->is_unique_children;
for (int i = random_in_range(children); i > 0; i--)
ic = list_next(&is->is_unique_child, ic);
ASSERT3P(ic, !=, NULL);
is->is_good_child = ic;
}
if (vdev_indirect_splits_checksum_validate(iv, zio) == 0)
return (0);
}
return (SET_ERROR(ECKSUM));
}
/*
* This is a validation function for reconstruction. It randomly selects
* a good combination, if one can be found, and then it intentionally
* damages all other segment copes by zeroing them. This forces the
* reconstruction algorithm to locate the one remaining known good copy.
*/
static int
vdev_indirect_splits_damage(indirect_vsd_t *iv, zio_t *zio)
{
int error;
/* Presume all the copies are unique for initial selection. */
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
is->is_unique_children = 0;
for (int i = 0; i < is->is_children; i++) {
indirect_child_t *ic = &is->is_child[i];
if (ic->ic_data != NULL) {
is->is_unique_children++;
list_insert_tail(&is->is_unique_child, ic);
}
}
if (list_is_empty(&is->is_unique_child)) {
error = SET_ERROR(EIO);
goto out;
}
}
/*
* Set each is_good_child to a randomly-selected child which
* is known to contain validated data.
*/
error = vdev_indirect_splits_enumerate_randomly(iv, zio);
if (error)
goto out;
/*
* Damage all but the known good copy by zeroing it. This will
* result in two or less unique copies per indirect_child_t.
* Both may need to be checked in order to reconstruct the block.
* Set iv->iv_attempts_max such that all unique combinations will
* enumerated, but limit the damage to at most 12 indirect splits.
*/
iv->iv_attempts_max = 1;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
for (int c = 0; c < is->is_children; c++) {
indirect_child_t *ic = &is->is_child[c];
if (ic == is->is_good_child)
continue;
if (ic->ic_data == NULL)
continue;
abd_zero(ic->ic_data, abd_get_size(ic->ic_data));
}
iv->iv_attempts_max *= 2;
if (iv->iv_attempts_max >= (1ULL << 12)) {
iv->iv_attempts_max = UINT64_MAX;
break;
}
}
out:
/* Empty the unique children lists so they can be reconstructed. */
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
indirect_child_t *ic;
while ((ic = list_remove_head(&is->is_unique_child)) != NULL)
;
is->is_unique_children = 0;
}
return (error);
}
/*
* This function is called when we have read all copies of the data and need
* to try to find a combination of copies that gives us the right checksum.
*
* If we pointed to any mirror vdevs, this effectively does the job of the
* mirror. The mirror vdev code can't do its own job because we don't know
* the checksum of each split segment individually.
*
* We have to try every unique combination of copies of split segments, until
* we find one that checksums correctly. Duplicate segment copies are first
* identified and latter skipped during reconstruction. This optimization
* reduces the search space and ensures that of the remaining combinations
* at most one is correct.
*
* When the total number of combinations is small they can all be checked.
* For example, if we have 3 segments in the split, and each points to a
* 2-way mirror with unique copies, we will have the following pieces of data:
*
* | mirror child
* split | [0] [1]
* ======|=====================
* A | data_A_0 data_A_1
* B | data_B_0 data_B_1
* C | data_C_0 data_C_1
*
* We will try the following (mirror children)^(number of splits) (2^3=8)
* combinations, which is similar to bitwise-little-endian counting in
* binary. In general each "digit" corresponds to a split segment, and the
* base of each digit is is_children, which can be different for each
* digit.
*
* "low bit" "high bit"
* v v
* data_A_0 data_B_0 data_C_0
* data_A_1 data_B_0 data_C_0
* data_A_0 data_B_1 data_C_0
* data_A_1 data_B_1 data_C_0
* data_A_0 data_B_0 data_C_1
* data_A_1 data_B_0 data_C_1
* data_A_0 data_B_1 data_C_1
* data_A_1 data_B_1 data_C_1
*
* Note that the split segments may be on the same or different top-level
* vdevs. In either case, we may need to try lots of combinations (see
* zfs_reconstruct_indirect_combinations_max). This ensures that if a mirror
* has small silent errors on all of its children, we can still reconstruct
* the correct data, as long as those errors are at sufficiently-separated
* offsets (specifically, separated by the largest block size - default of
* 128KB, but up to 16MB).
*/
static void
vdev_indirect_reconstruct_io_done(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
boolean_t known_good = B_FALSE;
int error;
iv->iv_unique_combinations = 1;
iv->iv_attempts_max = UINT64_MAX;
if (zfs_reconstruct_indirect_combinations_max > 0)
iv->iv_attempts_max = zfs_reconstruct_indirect_combinations_max;
/*
* If nonzero, every 1/x blocks will be damaged, in order to validate
* reconstruction when there are split segments with damaged copies.
* Known_good will be TRUE when reconstruction is known to be possible.
*/
if (zfs_reconstruct_indirect_damage_fraction != 0 &&
random_in_range(zfs_reconstruct_indirect_damage_fraction) == 0)
known_good = (vdev_indirect_splits_damage(iv, zio) == 0);
/*
* Determine the unique children for a split segment and add them
* to the is_unique_child list. By restricting reconstruction
* to these children, only unique combinations will be considered.
* This can vastly reduce the search space when there are a large
* number of indirect splits.
*/
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
is->is_unique_children = 0;
for (int i = 0; i < is->is_children; i++) {
indirect_child_t *ic_i = &is->is_child[i];
if (ic_i->ic_data == NULL ||
ic_i->ic_duplicate != NULL)
continue;
for (int j = i + 1; j < is->is_children; j++) {
indirect_child_t *ic_j = &is->is_child[j];
if (ic_j->ic_data == NULL ||
ic_j->ic_duplicate != NULL)
continue;
if (abd_cmp(ic_i->ic_data, ic_j->ic_data) == 0)
ic_j->ic_duplicate = ic_i;
}
is->is_unique_children++;
list_insert_tail(&is->is_unique_child, ic_i);
}
/* Reconstruction is impossible, no valid children */
EQUIV(list_is_empty(&is->is_unique_child),
is->is_unique_children == 0);
if (list_is_empty(&is->is_unique_child)) {
zio->io_error = EIO;
vdev_indirect_all_checksum_errors(zio);
zio_checksum_verified(zio);
return;
}
iv->iv_unique_combinations *= is->is_unique_children;
}
if (iv->iv_unique_combinations <= iv->iv_attempts_max)
error = vdev_indirect_splits_enumerate_all(iv, zio);
else
error = vdev_indirect_splits_enumerate_randomly(iv, zio);
if (error != 0) {
/* All attempted combinations failed. */
ASSERT3B(known_good, ==, B_FALSE);
zio->io_error = error;
vdev_indirect_all_checksum_errors(zio);
} else {
/*
* The checksum has been successfully validated. Issue
* repair I/Os to any copies of splits which don't match
* the validated version.
*/
ASSERT0(vdev_indirect_splits_checksum_validate(iv, zio));
vdev_indirect_repair(zio);
zio_checksum_verified(zio);
}
}
static void
vdev_indirect_io_done(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
if (iv->iv_reconstruct) {
/*
* We have read all copies of the data (e.g. from mirrors),
* either because this was a scrub/resilver, or because the
* one-copy read didn't checksum correctly.
*/
vdev_indirect_reconstruct_io_done(zio);
return;
}
if (!iv->iv_split_block) {
/*
* This was not a split block, so we passed the BP down,
* and the checksum was handled by the (one) child zio.
*/
return;
}
zio_bad_cksum_t zbc;
int ret = zio_checksum_error(zio, &zbc);
if (ret == 0) {
zio_checksum_verified(zio);
return;
}
/*
* The checksum didn't match. Read all copies of all splits, and
* then we will try to reconstruct. The next time
* vdev_indirect_io_done() is called, iv_reconstruct will be set.
*/
vdev_indirect_read_all(zio);
zio_vdev_io_redone(zio);
}
vdev_ops_t vdev_indirect_ops = {
.vdev_op_init = NULL,
.vdev_op_fini = NULL,
.vdev_op_open = vdev_indirect_open,
.vdev_op_close = vdev_indirect_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_indirect_io_start,
.vdev_op_io_done = vdev_indirect_io_done,
.vdev_op_state_change = NULL,
.vdev_op_need_resilver = NULL,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = vdev_indirect_remap,
.vdev_op_xlate = NULL,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = NULL,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_INDIRECT, /* name of this vdev type */
.vdev_op_leaf = B_FALSE /* leaf vdev */
};
EXPORT_SYMBOL(spa_condense_fini);
EXPORT_SYMBOL(spa_start_indirect_condensing_thread);
EXPORT_SYMBOL(spa_condense_indirect_start_sync);
EXPORT_SYMBOL(spa_condense_init);
EXPORT_SYMBOL(spa_vdev_indirect_mark_obsolete);
EXPORT_SYMBOL(vdev_indirect_mark_obsolete);
EXPORT_SYMBOL(vdev_indirect_should_condense);
EXPORT_SYMBOL(vdev_indirect_sync_obsolete);
EXPORT_SYMBOL(vdev_obsolete_counts_are_precise);
EXPORT_SYMBOL(vdev_obsolete_sm_object);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_vdevs_enable, INT,
ZMOD_RW, "Whether to attempt condensing indirect vdev mappings");
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_obsolete_pct, UINT,
ZMOD_RW,
"Minimum obsolete percent of bytes in the mapping "
"to attempt condensing");
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, min_mapping_bytes, U64, ZMOD_RW,
"Don't bother condensing if the mapping uses less than this amount of "
"memory");
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, max_obsolete_bytes, U64,
ZMOD_RW,
"Minimum size obsolete spacemap to attempt condensing");
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_commit_entry_delay_ms,
UINT, ZMOD_RW,
"Used by tests to ensure certain actions happen in the middle of a "
"condense. A maximum value of 1 should be sufficient.");
ZFS_MODULE_PARAM(zfs_reconstruct, zfs_reconstruct_, indirect_combinations_max,
UINT, ZMOD_RW,
"Maximum number of combinations when reconstructing split segments");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/vdev_raidz.c b/sys/contrib/openzfs/module/zfs/vdev_raidz.c
index 14b98a76b84f..3445fa9d35d5 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_raidz.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_raidz.c
@@ -1,2673 +1,2673 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2016 Gvozden Nešković. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/vdev_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/abd.h>
#include <sys/fs/zfs.h>
#include <sys/fm/fs/zfs.h>
#include <sys/vdev_raidz.h>
#include <sys/vdev_raidz_impl.h>
#include <sys/vdev_draid.h>
#ifdef ZFS_DEBUG
#include <sys/vdev.h> /* For vdev_xlate() in vdev_raidz_io_verify() */
#endif
/*
* Virtual device vector for RAID-Z.
*
* This vdev supports single, double, and triple parity. For single parity,
* we use a simple XOR of all the data columns. For double or triple parity,
* we use a special case of Reed-Solomon coding. This extends the
* technique described in "The mathematics of RAID-6" by H. Peter Anvin by
* drawing on the system described in "A Tutorial on Reed-Solomon Coding for
* Fault-Tolerance in RAID-like Systems" by James S. Plank on which the
* former is also based. The latter is designed to provide higher performance
* for writes.
*
* Note that the Plank paper claimed to support arbitrary N+M, but was then
* amended six years later identifying a critical flaw that invalidates its
* claims. Nevertheless, the technique can be adapted to work for up to
* triple parity. For additional parity, the amendment "Note: Correction to
* the 1997 Tutorial on Reed-Solomon Coding" by James S. Plank and Ying Ding
* is viable, but the additional complexity means that write performance will
* suffer.
*
* All of the methods above operate on a Galois field, defined over the
* integers mod 2^N. In our case we choose N=8 for GF(8) so that all elements
* can be expressed with a single byte. Briefly, the operations on the
* field are defined as follows:
*
* o addition (+) is represented by a bitwise XOR
* o subtraction (-) is therefore identical to addition: A + B = A - B
* o multiplication of A by 2 is defined by the following bitwise expression:
*
* (A * 2)_7 = A_6
* (A * 2)_6 = A_5
* (A * 2)_5 = A_4
* (A * 2)_4 = A_3 + A_7
* (A * 2)_3 = A_2 + A_7
* (A * 2)_2 = A_1 + A_7
* (A * 2)_1 = A_0
* (A * 2)_0 = A_7
*
* In C, multiplying by 2 is therefore ((a << 1) ^ ((a & 0x80) ? 0x1d : 0)).
* As an aside, this multiplication is derived from the error correcting
* primitive polynomial x^8 + x^4 + x^3 + x^2 + 1.
*
* Observe that any number in the field (except for 0) can be expressed as a
* power of 2 -- a generator for the field. We store a table of the powers of
* 2 and logs base 2 for quick look ups, and exploit the fact that A * B can
* be rewritten as 2^(log_2(A) + log_2(B)) (where '+' is normal addition rather
* than field addition). The inverse of a field element A (A^-1) is therefore
* A ^ (255 - 1) = A^254.
*
* The up-to-three parity columns, P, Q, R over several data columns,
* D_0, ... D_n-1, can be expressed by field operations:
*
* P = D_0 + D_1 + ... + D_n-2 + D_n-1
* Q = 2^n-1 * D_0 + 2^n-2 * D_1 + ... + 2^1 * D_n-2 + 2^0 * D_n-1
* = ((...((D_0) * 2 + D_1) * 2 + ...) * 2 + D_n-2) * 2 + D_n-1
* R = 4^n-1 * D_0 + 4^n-2 * D_1 + ... + 4^1 * D_n-2 + 4^0 * D_n-1
* = ((...((D_0) * 4 + D_1) * 4 + ...) * 4 + D_n-2) * 4 + D_n-1
*
* We chose 1, 2, and 4 as our generators because 1 corresponds to the trivial
* XOR operation, and 2 and 4 can be computed quickly and generate linearly-
* independent coefficients. (There are no additional coefficients that have
* this property which is why the uncorrected Plank method breaks down.)
*
* See the reconstruction code below for how P, Q and R can used individually
* or in concert to recover missing data columns.
*/
#define VDEV_RAIDZ_P 0
#define VDEV_RAIDZ_Q 1
#define VDEV_RAIDZ_R 2
#define VDEV_RAIDZ_MUL_2(x) (((x) << 1) ^ (((x) & 0x80) ? 0x1d : 0))
#define VDEV_RAIDZ_MUL_4(x) (VDEV_RAIDZ_MUL_2(VDEV_RAIDZ_MUL_2(x)))
/*
* We provide a mechanism to perform the field multiplication operation on a
* 64-bit value all at once rather than a byte at a time. This works by
* creating a mask from the top bit in each byte and using that to
* conditionally apply the XOR of 0x1d.
*/
#define VDEV_RAIDZ_64MUL_2(x, mask) \
{ \
(mask) = (x) & 0x8080808080808080ULL; \
(mask) = ((mask) << 1) - ((mask) >> 7); \
(x) = (((x) << 1) & 0xfefefefefefefefeULL) ^ \
((mask) & 0x1d1d1d1d1d1d1d1dULL); \
}
#define VDEV_RAIDZ_64MUL_4(x, mask) \
{ \
VDEV_RAIDZ_64MUL_2((x), mask); \
VDEV_RAIDZ_64MUL_2((x), mask); \
}
static void
vdev_raidz_row_free(raidz_row_t *rr)
{
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_size != 0)
abd_free(rc->rc_abd);
if (rc->rc_orig_data != NULL)
abd_free(rc->rc_orig_data);
}
if (rr->rr_abd_empty != NULL)
abd_free(rr->rr_abd_empty);
kmem_free(rr, offsetof(raidz_row_t, rr_col[rr->rr_scols]));
}
void
vdev_raidz_map_free(raidz_map_t *rm)
{
for (int i = 0; i < rm->rm_nrows; i++)
vdev_raidz_row_free(rm->rm_row[i]);
kmem_free(rm, offsetof(raidz_map_t, rm_row[rm->rm_nrows]));
}
static void
vdev_raidz_map_free_vsd(zio_t *zio)
{
raidz_map_t *rm = zio->io_vsd;
vdev_raidz_map_free(rm);
}
const zio_vsd_ops_t vdev_raidz_vsd_ops = {
.vsd_free = vdev_raidz_map_free_vsd,
};
static void
vdev_raidz_map_alloc_write(zio_t *zio, raidz_map_t *rm, uint64_t ashift)
{
int c;
int nwrapped = 0;
uint64_t off = 0;
raidz_row_t *rr = rm->rm_row[0];
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
ASSERT3U(rm->rm_nrows, ==, 1);
/*
* Pad any parity columns with additional space to account for skip
* sectors.
*/
if (rm->rm_skipstart < rr->rr_firstdatacol) {
ASSERT0(rm->rm_skipstart);
nwrapped = rm->rm_nskip;
} else if (rr->rr_scols < (rm->rm_skipstart + rm->rm_nskip)) {
nwrapped =
(rm->rm_skipstart + rm->rm_nskip) % rr->rr_scols;
}
/*
* Optional single skip sectors (rc_size == 0) will be handled in
* vdev_raidz_io_start_write().
*/
int skipped = rr->rr_scols - rr->rr_cols;
/* Allocate buffers for the parity columns */
for (c = 0; c < rr->rr_firstdatacol; c++) {
raidz_col_t *rc = &rr->rr_col[c];
/*
* Parity columns will pad out a linear ABD to account for
* the skip sector. A linear ABD is used here because
* parity calculations use the ABD buffer directly to calculate
* parity. This avoids doing a memcpy back to the ABD after the
* parity has been calculated. By issuing the parity column
* with the skip sector we can reduce contention on the child
* VDEV queue locks (vq_lock).
*/
if (c < nwrapped) {
rc->rc_abd = abd_alloc_linear(
rc->rc_size + (1ULL << ashift), B_FALSE);
abd_zero_off(rc->rc_abd, rc->rc_size, 1ULL << ashift);
skipped++;
} else {
rc->rc_abd = abd_alloc_linear(rc->rc_size, B_FALSE);
}
}
for (off = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
abd_t *abd = abd_get_offset_struct(&rc->rc_abdstruct,
zio->io_abd, off, rc->rc_size);
/*
* Generate I/O for skip sectors to improve aggregation
* continuity. We will use gang ABD's to reduce contention
* on the child VDEV queue locks (vq_lock) by issuing
* a single I/O that contains the data and skip sector.
*
* It is important to make sure that rc_size is not updated
* even though we are adding a skip sector to the ABD. When
* calculating the parity in vdev_raidz_generate_parity_row()
* the rc_size is used to iterate through the ABD's. We can
* not have zero'd out skip sectors used for calculating
* parity for raidz, because those same sectors are not used
* during reconstruction.
*/
if (c >= rm->rm_skipstart && skipped < rm->rm_nskip) {
rc->rc_abd = abd_alloc_gang();
abd_gang_add(rc->rc_abd, abd, B_TRUE);
abd_gang_add(rc->rc_abd,
abd_get_zeros(1ULL << ashift), B_TRUE);
skipped++;
} else {
rc->rc_abd = abd;
}
off += rc->rc_size;
}
ASSERT3U(off, ==, zio->io_size);
ASSERT3S(skipped, ==, rm->rm_nskip);
}
static void
vdev_raidz_map_alloc_read(zio_t *zio, raidz_map_t *rm)
{
int c;
raidz_row_t *rr = rm->rm_row[0];
ASSERT3U(rm->rm_nrows, ==, 1);
/* Allocate buffers for the parity columns */
for (c = 0; c < rr->rr_firstdatacol; c++)
rr->rr_col[c].rc_abd =
abd_alloc_linear(rr->rr_col[c].rc_size, B_FALSE);
for (uint64_t off = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
zio->io_abd, off, rc->rc_size);
off += rc->rc_size;
}
}
/*
* Divides the IO evenly across all child vdevs; usually, dcols is
* the number of children in the target vdev.
*
* Avoid inlining the function to keep vdev_raidz_io_start(), which
* is this functions only caller, as small as possible on the stack.
*/
noinline raidz_map_t *
vdev_raidz_map_alloc(zio_t *zio, uint64_t ashift, uint64_t dcols,
uint64_t nparity)
{
raidz_row_t *rr;
/* The starting RAIDZ (parent) vdev sector of the block. */
uint64_t b = zio->io_offset >> ashift;
/* The zio's size in units of the vdev's minimum sector size. */
uint64_t s = zio->io_size >> ashift;
/* The first column for this stripe. */
uint64_t f = b % dcols;
/* The starting byte offset on each child vdev. */
uint64_t o = (b / dcols) << ashift;
uint64_t q, r, c, bc, col, acols, scols, coff, devidx, asize, tot;
raidz_map_t *rm =
kmem_zalloc(offsetof(raidz_map_t, rm_row[1]), KM_SLEEP);
rm->rm_nrows = 1;
/*
* "Quotient": The number of data sectors for this stripe on all but
* the "big column" child vdevs that also contain "remainder" data.
*/
q = s / (dcols - nparity);
/*
* "Remainder": The number of partial stripe data sectors in this I/O.
* This will add a sector to some, but not all, child vdevs.
*/
r = s - q * (dcols - nparity);
/* The number of "big columns" - those which contain remainder data. */
bc = (r == 0 ? 0 : r + nparity);
/*
* The total number of data and parity sectors associated with
* this I/O.
*/
tot = s + nparity * (q + (r == 0 ? 0 : 1));
/*
* acols: The columns that will be accessed.
* scols: The columns that will be accessed or skipped.
*/
if (q == 0) {
/* Our I/O request doesn't span all child vdevs. */
acols = bc;
scols = MIN(dcols, roundup(bc, nparity + 1));
} else {
acols = dcols;
scols = dcols;
}
ASSERT3U(acols, <=, scols);
rr = kmem_alloc(offsetof(raidz_row_t, rr_col[scols]), KM_SLEEP);
rm->rm_row[0] = rr;
rr->rr_cols = acols;
rr->rr_scols = scols;
rr->rr_bigcols = bc;
rr->rr_missingdata = 0;
rr->rr_missingparity = 0;
rr->rr_firstdatacol = nparity;
rr->rr_abd_empty = NULL;
rr->rr_nempty = 0;
#ifdef ZFS_DEBUG
rr->rr_offset = zio->io_offset;
rr->rr_size = zio->io_size;
#endif
asize = 0;
for (c = 0; c < scols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
col = f + c;
coff = o;
if (col >= dcols) {
col -= dcols;
coff += 1ULL << ashift;
}
rc->rc_devidx = col;
rc->rc_offset = coff;
rc->rc_abd = NULL;
rc->rc_orig_data = NULL;
rc->rc_error = 0;
rc->rc_tried = 0;
rc->rc_skipped = 0;
rc->rc_force_repair = 0;
rc->rc_allow_repair = 1;
rc->rc_need_orig_restore = B_FALSE;
if (c >= acols)
rc->rc_size = 0;
else if (c < bc)
rc->rc_size = (q + 1) << ashift;
else
rc->rc_size = q << ashift;
asize += rc->rc_size;
}
ASSERT3U(asize, ==, tot << ashift);
rm->rm_nskip = roundup(tot, nparity + 1) - tot;
rm->rm_skipstart = bc;
/*
* If all data stored spans all columns, there's a danger that parity
* will always be on the same device and, since parity isn't read
* during normal operation, that device's I/O bandwidth won't be
* used effectively. We therefore switch the parity every 1MB.
*
* ... at least that was, ostensibly, the theory. As a practical
* matter unless we juggle the parity between all devices evenly, we
* won't see any benefit. Further, occasional writes that aren't a
* multiple of the LCM of the number of children and the minimum
* stripe width are sufficient to avoid pessimal behavior.
* Unfortunately, this decision created an implicit on-disk format
* requirement that we need to support for all eternity, but only
* for single-parity RAID-Z.
*
* If we intend to skip a sector in the zeroth column for padding
* we must make sure to note this swap. We will never intend to
* skip the first column since at least one data and one parity
* column must appear in each row.
*/
ASSERT(rr->rr_cols >= 2);
ASSERT(rr->rr_col[0].rc_size == rr->rr_col[1].rc_size);
if (rr->rr_firstdatacol == 1 && (zio->io_offset & (1ULL << 20))) {
devidx = rr->rr_col[0].rc_devidx;
o = rr->rr_col[0].rc_offset;
rr->rr_col[0].rc_devidx = rr->rr_col[1].rc_devidx;
rr->rr_col[0].rc_offset = rr->rr_col[1].rc_offset;
rr->rr_col[1].rc_devidx = devidx;
rr->rr_col[1].rc_offset = o;
if (rm->rm_skipstart == 0)
rm->rm_skipstart = 1;
}
if (zio->io_type == ZIO_TYPE_WRITE) {
vdev_raidz_map_alloc_write(zio, rm, ashift);
} else {
vdev_raidz_map_alloc_read(zio, rm);
}
/* init RAIDZ parity ops */
rm->rm_ops = vdev_raidz_math_get_ops();
return (rm);
}
struct pqr_struct {
uint64_t *p;
uint64_t *q;
uint64_t *r;
};
static int
vdev_raidz_p_func(void *buf, size_t size, void *private)
{
struct pqr_struct *pqr = private;
const uint64_t *src = buf;
int i, cnt = size / sizeof (src[0]);
ASSERT(pqr->p && !pqr->q && !pqr->r);
for (i = 0; i < cnt; i++, src++, pqr->p++)
*pqr->p ^= *src;
return (0);
}
static int
vdev_raidz_pq_func(void *buf, size_t size, void *private)
{
struct pqr_struct *pqr = private;
const uint64_t *src = buf;
uint64_t mask;
int i, cnt = size / sizeof (src[0]);
ASSERT(pqr->p && pqr->q && !pqr->r);
for (i = 0; i < cnt; i++, src++, pqr->p++, pqr->q++) {
*pqr->p ^= *src;
VDEV_RAIDZ_64MUL_2(*pqr->q, mask);
*pqr->q ^= *src;
}
return (0);
}
static int
vdev_raidz_pqr_func(void *buf, size_t size, void *private)
{
struct pqr_struct *pqr = private;
const uint64_t *src = buf;
uint64_t mask;
int i, cnt = size / sizeof (src[0]);
ASSERT(pqr->p && pqr->q && pqr->r);
for (i = 0; i < cnt; i++, src++, pqr->p++, pqr->q++, pqr->r++) {
*pqr->p ^= *src;
VDEV_RAIDZ_64MUL_2(*pqr->q, mask);
*pqr->q ^= *src;
VDEV_RAIDZ_64MUL_4(*pqr->r, mask);
*pqr->r ^= *src;
}
return (0);
}
static void
vdev_raidz_generate_parity_p(raidz_row_t *rr)
{
uint64_t *p = abd_to_buf(rr->rr_col[VDEV_RAIDZ_P].rc_abd);
for (int c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
abd_t *src = rr->rr_col[c].rc_abd;
if (c == rr->rr_firstdatacol) {
abd_copy_to_buf(p, src, rr->rr_col[c].rc_size);
} else {
struct pqr_struct pqr = { p, NULL, NULL };
(void) abd_iterate_func(src, 0, rr->rr_col[c].rc_size,
vdev_raidz_p_func, &pqr);
}
}
}
static void
vdev_raidz_generate_parity_pq(raidz_row_t *rr)
{
uint64_t *p = abd_to_buf(rr->rr_col[VDEV_RAIDZ_P].rc_abd);
uint64_t *q = abd_to_buf(rr->rr_col[VDEV_RAIDZ_Q].rc_abd);
uint64_t pcnt = rr->rr_col[VDEV_RAIDZ_P].rc_size / sizeof (p[0]);
ASSERT(rr->rr_col[VDEV_RAIDZ_P].rc_size ==
rr->rr_col[VDEV_RAIDZ_Q].rc_size);
for (int c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
abd_t *src = rr->rr_col[c].rc_abd;
uint64_t ccnt = rr->rr_col[c].rc_size / sizeof (p[0]);
if (c == rr->rr_firstdatacol) {
ASSERT(ccnt == pcnt || ccnt == 0);
abd_copy_to_buf(p, src, rr->rr_col[c].rc_size);
(void) memcpy(q, p, rr->rr_col[c].rc_size);
for (uint64_t i = ccnt; i < pcnt; i++) {
p[i] = 0;
q[i] = 0;
}
} else {
struct pqr_struct pqr = { p, q, NULL };
ASSERT(ccnt <= pcnt);
(void) abd_iterate_func(src, 0, rr->rr_col[c].rc_size,
vdev_raidz_pq_func, &pqr);
/*
* Treat short columns as though they are full of 0s.
* Note that there's therefore nothing needed for P.
*/
uint64_t mask;
for (uint64_t i = ccnt; i < pcnt; i++) {
VDEV_RAIDZ_64MUL_2(q[i], mask);
}
}
}
}
static void
vdev_raidz_generate_parity_pqr(raidz_row_t *rr)
{
uint64_t *p = abd_to_buf(rr->rr_col[VDEV_RAIDZ_P].rc_abd);
uint64_t *q = abd_to_buf(rr->rr_col[VDEV_RAIDZ_Q].rc_abd);
uint64_t *r = abd_to_buf(rr->rr_col[VDEV_RAIDZ_R].rc_abd);
uint64_t pcnt = rr->rr_col[VDEV_RAIDZ_P].rc_size / sizeof (p[0]);
ASSERT(rr->rr_col[VDEV_RAIDZ_P].rc_size ==
rr->rr_col[VDEV_RAIDZ_Q].rc_size);
ASSERT(rr->rr_col[VDEV_RAIDZ_P].rc_size ==
rr->rr_col[VDEV_RAIDZ_R].rc_size);
for (int c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
abd_t *src = rr->rr_col[c].rc_abd;
uint64_t ccnt = rr->rr_col[c].rc_size / sizeof (p[0]);
if (c == rr->rr_firstdatacol) {
ASSERT(ccnt == pcnt || ccnt == 0);
abd_copy_to_buf(p, src, rr->rr_col[c].rc_size);
(void) memcpy(q, p, rr->rr_col[c].rc_size);
(void) memcpy(r, p, rr->rr_col[c].rc_size);
for (uint64_t i = ccnt; i < pcnt; i++) {
p[i] = 0;
q[i] = 0;
r[i] = 0;
}
} else {
struct pqr_struct pqr = { p, q, r };
ASSERT(ccnt <= pcnt);
(void) abd_iterate_func(src, 0, rr->rr_col[c].rc_size,
vdev_raidz_pqr_func, &pqr);
/*
* Treat short columns as though they are full of 0s.
* Note that there's therefore nothing needed for P.
*/
uint64_t mask;
for (uint64_t i = ccnt; i < pcnt; i++) {
VDEV_RAIDZ_64MUL_2(q[i], mask);
VDEV_RAIDZ_64MUL_4(r[i], mask);
}
}
}
}
/*
* Generate RAID parity in the first virtual columns according to the number of
* parity columns available.
*/
void
vdev_raidz_generate_parity_row(raidz_map_t *rm, raidz_row_t *rr)
{
ASSERT3U(rr->rr_cols, !=, 0);
/* Generate using the new math implementation */
if (vdev_raidz_math_generate(rm, rr) != RAIDZ_ORIGINAL_IMPL)
return;
switch (rr->rr_firstdatacol) {
case 1:
vdev_raidz_generate_parity_p(rr);
break;
case 2:
vdev_raidz_generate_parity_pq(rr);
break;
case 3:
vdev_raidz_generate_parity_pqr(rr);
break;
default:
cmn_err(CE_PANIC, "invalid RAID-Z configuration");
}
}
void
vdev_raidz_generate_parity(raidz_map_t *rm)
{
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
vdev_raidz_generate_parity_row(rm, rr);
}
}
static int
vdev_raidz_reconst_p_func(void *dbuf, void *sbuf, size_t size, void *private)
{
(void) private;
uint64_t *dst = dbuf;
uint64_t *src = sbuf;
int cnt = size / sizeof (src[0]);
for (int i = 0; i < cnt; i++) {
dst[i] ^= src[i];
}
return (0);
}
static int
vdev_raidz_reconst_q_pre_func(void *dbuf, void *sbuf, size_t size,
void *private)
{
(void) private;
uint64_t *dst = dbuf;
uint64_t *src = sbuf;
uint64_t mask;
int cnt = size / sizeof (dst[0]);
for (int i = 0; i < cnt; i++, dst++, src++) {
VDEV_RAIDZ_64MUL_2(*dst, mask);
*dst ^= *src;
}
return (0);
}
static int
vdev_raidz_reconst_q_pre_tail_func(void *buf, size_t size, void *private)
{
(void) private;
uint64_t *dst = buf;
uint64_t mask;
int cnt = size / sizeof (dst[0]);
for (int i = 0; i < cnt; i++, dst++) {
/* same operation as vdev_raidz_reconst_q_pre_func() on dst */
VDEV_RAIDZ_64MUL_2(*dst, mask);
}
return (0);
}
struct reconst_q_struct {
uint64_t *q;
int exp;
};
static int
vdev_raidz_reconst_q_post_func(void *buf, size_t size, void *private)
{
struct reconst_q_struct *rq = private;
uint64_t *dst = buf;
int cnt = size / sizeof (dst[0]);
for (int i = 0; i < cnt; i++, dst++, rq->q++) {
int j;
uint8_t *b;
*dst ^= *rq->q;
for (j = 0, b = (uint8_t *)dst; j < 8; j++, b++) {
*b = vdev_raidz_exp2(*b, rq->exp);
}
}
return (0);
}
struct reconst_pq_struct {
uint8_t *p;
uint8_t *q;
uint8_t *pxy;
uint8_t *qxy;
int aexp;
int bexp;
};
static int
vdev_raidz_reconst_pq_func(void *xbuf, void *ybuf, size_t size, void *private)
{
struct reconst_pq_struct *rpq = private;
uint8_t *xd = xbuf;
uint8_t *yd = ybuf;
for (int i = 0; i < size;
i++, rpq->p++, rpq->q++, rpq->pxy++, rpq->qxy++, xd++, yd++) {
*xd = vdev_raidz_exp2(*rpq->p ^ *rpq->pxy, rpq->aexp) ^
vdev_raidz_exp2(*rpq->q ^ *rpq->qxy, rpq->bexp);
*yd = *rpq->p ^ *rpq->pxy ^ *xd;
}
return (0);
}
static int
vdev_raidz_reconst_pq_tail_func(void *xbuf, size_t size, void *private)
{
struct reconst_pq_struct *rpq = private;
uint8_t *xd = xbuf;
for (int i = 0; i < size;
i++, rpq->p++, rpq->q++, rpq->pxy++, rpq->qxy++, xd++) {
/* same operation as vdev_raidz_reconst_pq_func() on xd */
*xd = vdev_raidz_exp2(*rpq->p ^ *rpq->pxy, rpq->aexp) ^
vdev_raidz_exp2(*rpq->q ^ *rpq->qxy, rpq->bexp);
}
return (0);
}
static void
vdev_raidz_reconstruct_p(raidz_row_t *rr, int *tgts, int ntgts)
{
int x = tgts[0];
abd_t *dst, *src;
ASSERT3U(ntgts, ==, 1);
ASSERT3U(x, >=, rr->rr_firstdatacol);
ASSERT3U(x, <, rr->rr_cols);
ASSERT3U(rr->rr_col[x].rc_size, <=, rr->rr_col[VDEV_RAIDZ_P].rc_size);
src = rr->rr_col[VDEV_RAIDZ_P].rc_abd;
dst = rr->rr_col[x].rc_abd;
abd_copy_from_buf(dst, abd_to_buf(src), rr->rr_col[x].rc_size);
for (int c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
uint64_t size = MIN(rr->rr_col[x].rc_size,
rr->rr_col[c].rc_size);
src = rr->rr_col[c].rc_abd;
if (c == x)
continue;
(void) abd_iterate_func2(dst, src, 0, 0, size,
vdev_raidz_reconst_p_func, NULL);
}
}
static void
vdev_raidz_reconstruct_q(raidz_row_t *rr, int *tgts, int ntgts)
{
int x = tgts[0];
int c, exp;
abd_t *dst, *src;
ASSERT(ntgts == 1);
ASSERT(rr->rr_col[x].rc_size <= rr->rr_col[VDEV_RAIDZ_Q].rc_size);
for (c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
uint64_t size = (c == x) ? 0 : MIN(rr->rr_col[x].rc_size,
rr->rr_col[c].rc_size);
src = rr->rr_col[c].rc_abd;
dst = rr->rr_col[x].rc_abd;
if (c == rr->rr_firstdatacol) {
abd_copy(dst, src, size);
if (rr->rr_col[x].rc_size > size) {
abd_zero_off(dst, size,
rr->rr_col[x].rc_size - size);
}
} else {
ASSERT3U(size, <=, rr->rr_col[x].rc_size);
(void) abd_iterate_func2(dst, src, 0, 0, size,
vdev_raidz_reconst_q_pre_func, NULL);
(void) abd_iterate_func(dst,
size, rr->rr_col[x].rc_size - size,
vdev_raidz_reconst_q_pre_tail_func, NULL);
}
}
src = rr->rr_col[VDEV_RAIDZ_Q].rc_abd;
dst = rr->rr_col[x].rc_abd;
exp = 255 - (rr->rr_cols - 1 - x);
struct reconst_q_struct rq = { abd_to_buf(src), exp };
(void) abd_iterate_func(dst, 0, rr->rr_col[x].rc_size,
vdev_raidz_reconst_q_post_func, &rq);
}
static void
vdev_raidz_reconstruct_pq(raidz_row_t *rr, int *tgts, int ntgts)
{
uint8_t *p, *q, *pxy, *qxy, tmp, a, b, aexp, bexp;
abd_t *pdata, *qdata;
uint64_t xsize, ysize;
int x = tgts[0];
int y = tgts[1];
abd_t *xd, *yd;
ASSERT(ntgts == 2);
ASSERT(x < y);
ASSERT(x >= rr->rr_firstdatacol);
ASSERT(y < rr->rr_cols);
ASSERT(rr->rr_col[x].rc_size >= rr->rr_col[y].rc_size);
/*
* Move the parity data aside -- we're going to compute parity as
* though columns x and y were full of zeros -- Pxy and Qxy. We want to
* reuse the parity generation mechanism without trashing the actual
* parity so we make those columns appear to be full of zeros by
* setting their lengths to zero.
*/
pdata = rr->rr_col[VDEV_RAIDZ_P].rc_abd;
qdata = rr->rr_col[VDEV_RAIDZ_Q].rc_abd;
xsize = rr->rr_col[x].rc_size;
ysize = rr->rr_col[y].rc_size;
rr->rr_col[VDEV_RAIDZ_P].rc_abd =
abd_alloc_linear(rr->rr_col[VDEV_RAIDZ_P].rc_size, B_TRUE);
rr->rr_col[VDEV_RAIDZ_Q].rc_abd =
abd_alloc_linear(rr->rr_col[VDEV_RAIDZ_Q].rc_size, B_TRUE);
rr->rr_col[x].rc_size = 0;
rr->rr_col[y].rc_size = 0;
vdev_raidz_generate_parity_pq(rr);
rr->rr_col[x].rc_size = xsize;
rr->rr_col[y].rc_size = ysize;
p = abd_to_buf(pdata);
q = abd_to_buf(qdata);
pxy = abd_to_buf(rr->rr_col[VDEV_RAIDZ_P].rc_abd);
qxy = abd_to_buf(rr->rr_col[VDEV_RAIDZ_Q].rc_abd);
xd = rr->rr_col[x].rc_abd;
yd = rr->rr_col[y].rc_abd;
/*
* We now have:
* Pxy = P + D_x + D_y
* Qxy = Q + 2^(ndevs - 1 - x) * D_x + 2^(ndevs - 1 - y) * D_y
*
* We can then solve for D_x:
* D_x = A * (P + Pxy) + B * (Q + Qxy)
* where
* A = 2^(x - y) * (2^(x - y) + 1)^-1
* B = 2^(ndevs - 1 - x) * (2^(x - y) + 1)^-1
*
* With D_x in hand, we can easily solve for D_y:
* D_y = P + Pxy + D_x
*/
a = vdev_raidz_pow2[255 + x - y];
b = vdev_raidz_pow2[255 - (rr->rr_cols - 1 - x)];
tmp = 255 - vdev_raidz_log2[a ^ 1];
aexp = vdev_raidz_log2[vdev_raidz_exp2(a, tmp)];
bexp = vdev_raidz_log2[vdev_raidz_exp2(b, tmp)];
ASSERT3U(xsize, >=, ysize);
struct reconst_pq_struct rpq = { p, q, pxy, qxy, aexp, bexp };
(void) abd_iterate_func2(xd, yd, 0, 0, ysize,
vdev_raidz_reconst_pq_func, &rpq);
(void) abd_iterate_func(xd, ysize, xsize - ysize,
vdev_raidz_reconst_pq_tail_func, &rpq);
abd_free(rr->rr_col[VDEV_RAIDZ_P].rc_abd);
abd_free(rr->rr_col[VDEV_RAIDZ_Q].rc_abd);
/*
* Restore the saved parity data.
*/
rr->rr_col[VDEV_RAIDZ_P].rc_abd = pdata;
rr->rr_col[VDEV_RAIDZ_Q].rc_abd = qdata;
}
/*
* In the general case of reconstruction, we must solve the system of linear
* equations defined by the coefficients used to generate parity as well as
* the contents of the data and parity disks. This can be expressed with
* vectors for the original data (D) and the actual data (d) and parity (p)
* and a matrix composed of the identity matrix (I) and a dispersal matrix (V):
*
* __ __ __ __
* | | __ __ | p_0 |
* | V | | D_0 | | p_m-1 |
* | | x | : | = | d_0 |
* | I | | D_n-1 | | : |
* | | ~~ ~~ | d_n-1 |
* ~~ ~~ ~~ ~~
*
* I is simply a square identity matrix of size n, and V is a vandermonde
* matrix defined by the coefficients we chose for the various parity columns
* (1, 2, 4). Note that these values were chosen both for simplicity, speedy
* computation as well as linear separability.
*
* __ __ __ __
* | 1 .. 1 1 1 | | p_0 |
* | 2^n-1 .. 4 2 1 | __ __ | : |
* | 4^n-1 .. 16 4 1 | | D_0 | | p_m-1 |
* | 1 .. 0 0 0 | | D_1 | | d_0 |
* | 0 .. 0 0 0 | x | D_2 | = | d_1 |
* | : : : : | | : | | d_2 |
* | 0 .. 1 0 0 | | D_n-1 | | : |
* | 0 .. 0 1 0 | ~~ ~~ | : |
* | 0 .. 0 0 1 | | d_n-1 |
* ~~ ~~ ~~ ~~
*
* Note that I, V, d, and p are known. To compute D, we must invert the
* matrix and use the known data and parity values to reconstruct the unknown
* data values. We begin by removing the rows in V|I and d|p that correspond
* to failed or missing columns; we then make V|I square (n x n) and d|p
* sized n by removing rows corresponding to unused parity from the bottom up
* to generate (V|I)' and (d|p)'. We can then generate the inverse of (V|I)'
* using Gauss-Jordan elimination. In the example below we use m=3 parity
* columns, n=8 data columns, with errors in d_1, d_2, and p_1:
* __ __
* | 1 1 1 1 1 1 1 1 |
* | 128 64 32 16 8 4 2 1 | <-----+-+-- missing disks
* | 19 205 116 29 64 16 4 1 | / /
* | 1 0 0 0 0 0 0 0 | / /
* | 0 1 0 0 0 0 0 0 | <--' /
* (V|I) = | 0 0 1 0 0 0 0 0 | <---'
* | 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 1 1 1 1 1 1 1 |
* | 128 64 32 16 8 4 2 1 |
* | 19 205 116 29 64 16 4 1 |
* | 1 0 0 0 0 0 0 0 |
* | 0 1 0 0 0 0 0 0 |
* (V|I)' = | 0 0 1 0 0 0 0 0 |
* | 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 |
* ~~ ~~
*
* Here we employ Gauss-Jordan elimination to find the inverse of (V|I)'. We
* have carefully chosen the seed values 1, 2, and 4 to ensure that this
* matrix is not singular.
* __ __
* | 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 |
* | 19 205 116 29 64 16 4 1 0 1 0 0 0 0 0 0 |
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 |
* | 19 205 116 29 64 16 4 1 0 1 0 0 0 0 0 0 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
* | 0 205 116 0 0 0 0 0 0 1 19 29 64 16 4 1 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
* | 0 0 185 0 0 0 0 0 205 1 222 208 141 221 201 204 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
* | 0 0 1 0 0 0 0 0 166 100 4 40 158 168 216 209 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 1 0 0 0 0 0 0 167 100 5 41 159 169 217 208 |
* | 0 0 1 0 0 0 0 0 166 100 4 40 158 168 216 209 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 0 0 1 0 0 0 0 0 |
* | 167 100 5 41 159 169 217 208 |
* | 166 100 4 40 158 168 216 209 |
* (V|I)'^-1 = | 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 |
* ~~ ~~
*
* We can then simply compute D = (V|I)'^-1 x (d|p)' to discover the values
* of the missing data.
*
* As is apparent from the example above, the only non-trivial rows in the
* inverse matrix correspond to the data disks that we're trying to
* reconstruct. Indeed, those are the only rows we need as the others would
* only be useful for reconstructing data known or assumed to be valid. For
* that reason, we only build the coefficients in the rows that correspond to
* targeted columns.
*/
static void
vdev_raidz_matrix_init(raidz_row_t *rr, int n, int nmap, int *map,
uint8_t **rows)
{
int i, j;
int pow;
ASSERT(n == rr->rr_cols - rr->rr_firstdatacol);
/*
* Fill in the missing rows of interest.
*/
for (i = 0; i < nmap; i++) {
ASSERT3S(0, <=, map[i]);
ASSERT3S(map[i], <=, 2);
pow = map[i] * n;
if (pow > 255)
pow -= 255;
ASSERT(pow <= 255);
for (j = 0; j < n; j++) {
pow -= map[i];
if (pow < 0)
pow += 255;
rows[i][j] = vdev_raidz_pow2[pow];
}
}
}
static void
vdev_raidz_matrix_invert(raidz_row_t *rr, int n, int nmissing, int *missing,
uint8_t **rows, uint8_t **invrows, const uint8_t *used)
{
int i, j, ii, jj;
uint8_t log;
/*
* Assert that the first nmissing entries from the array of used
* columns correspond to parity columns and that subsequent entries
* correspond to data columns.
*/
for (i = 0; i < nmissing; i++) {
ASSERT3S(used[i], <, rr->rr_firstdatacol);
}
for (; i < n; i++) {
ASSERT3S(used[i], >=, rr->rr_firstdatacol);
}
/*
* First initialize the storage where we'll compute the inverse rows.
*/
for (i = 0; i < nmissing; i++) {
for (j = 0; j < n; j++) {
invrows[i][j] = (i == j) ? 1 : 0;
}
}
/*
* Subtract all trivial rows from the rows of consequence.
*/
for (i = 0; i < nmissing; i++) {
for (j = nmissing; j < n; j++) {
ASSERT3U(used[j], >=, rr->rr_firstdatacol);
jj = used[j] - rr->rr_firstdatacol;
ASSERT3S(jj, <, n);
invrows[i][j] = rows[i][jj];
rows[i][jj] = 0;
}
}
/*
* For each of the rows of interest, we must normalize it and subtract
* a multiple of it from the other rows.
*/
for (i = 0; i < nmissing; i++) {
for (j = 0; j < missing[i]; j++) {
ASSERT0(rows[i][j]);
}
ASSERT3U(rows[i][missing[i]], !=, 0);
/*
* Compute the inverse of the first element and multiply each
* element in the row by that value.
*/
log = 255 - vdev_raidz_log2[rows[i][missing[i]]];
for (j = 0; j < n; j++) {
rows[i][j] = vdev_raidz_exp2(rows[i][j], log);
invrows[i][j] = vdev_raidz_exp2(invrows[i][j], log);
}
for (ii = 0; ii < nmissing; ii++) {
if (i == ii)
continue;
ASSERT3U(rows[ii][missing[i]], !=, 0);
log = vdev_raidz_log2[rows[ii][missing[i]]];
for (j = 0; j < n; j++) {
rows[ii][j] ^=
vdev_raidz_exp2(rows[i][j], log);
invrows[ii][j] ^=
vdev_raidz_exp2(invrows[i][j], log);
}
}
}
/*
* Verify that the data that is left in the rows are properly part of
* an identity matrix.
*/
for (i = 0; i < nmissing; i++) {
for (j = 0; j < n; j++) {
if (j == missing[i]) {
ASSERT3U(rows[i][j], ==, 1);
} else {
ASSERT0(rows[i][j]);
}
}
}
}
static void
vdev_raidz_matrix_reconstruct(raidz_row_t *rr, int n, int nmissing,
int *missing, uint8_t **invrows, const uint8_t *used)
{
int i, j, x, cc, c;
uint8_t *src;
uint64_t ccount;
uint8_t *dst[VDEV_RAIDZ_MAXPARITY] = { NULL };
uint64_t dcount[VDEV_RAIDZ_MAXPARITY] = { 0 };
uint8_t log = 0;
uint8_t val;
int ll;
uint8_t *invlog[VDEV_RAIDZ_MAXPARITY];
uint8_t *p, *pp;
size_t psize;
psize = sizeof (invlog[0][0]) * n * nmissing;
p = kmem_alloc(psize, KM_SLEEP);
for (pp = p, i = 0; i < nmissing; i++) {
invlog[i] = pp;
pp += n;
}
for (i = 0; i < nmissing; i++) {
for (j = 0; j < n; j++) {
ASSERT3U(invrows[i][j], !=, 0);
invlog[i][j] = vdev_raidz_log2[invrows[i][j]];
}
}
for (i = 0; i < n; i++) {
c = used[i];
ASSERT3U(c, <, rr->rr_cols);
ccount = rr->rr_col[c].rc_size;
ASSERT(ccount >= rr->rr_col[missing[0]].rc_size || i > 0);
if (ccount == 0)
continue;
src = abd_to_buf(rr->rr_col[c].rc_abd);
for (j = 0; j < nmissing; j++) {
cc = missing[j] + rr->rr_firstdatacol;
ASSERT3U(cc, >=, rr->rr_firstdatacol);
ASSERT3U(cc, <, rr->rr_cols);
ASSERT3U(cc, !=, c);
dcount[j] = rr->rr_col[cc].rc_size;
if (dcount[j] != 0)
dst[j] = abd_to_buf(rr->rr_col[cc].rc_abd);
}
for (x = 0; x < ccount; x++, src++) {
if (*src != 0)
log = vdev_raidz_log2[*src];
for (cc = 0; cc < nmissing; cc++) {
if (x >= dcount[cc])
continue;
if (*src == 0) {
val = 0;
} else {
if ((ll = log + invlog[cc][i]) >= 255)
ll -= 255;
val = vdev_raidz_pow2[ll];
}
if (i == 0)
dst[cc][x] = val;
else
dst[cc][x] ^= val;
}
}
}
kmem_free(p, psize);
}
static void
vdev_raidz_reconstruct_general(raidz_row_t *rr, int *tgts, int ntgts)
{
int n, i, c, t, tt;
int nmissing_rows;
int missing_rows[VDEV_RAIDZ_MAXPARITY];
int parity_map[VDEV_RAIDZ_MAXPARITY];
uint8_t *p, *pp;
size_t psize;
uint8_t *rows[VDEV_RAIDZ_MAXPARITY];
uint8_t *invrows[VDEV_RAIDZ_MAXPARITY];
uint8_t *used;
abd_t **bufs = NULL;
/*
* Matrix reconstruction can't use scatter ABDs yet, so we allocate
* temporary linear ABDs if any non-linear ABDs are found.
*/
for (i = rr->rr_firstdatacol; i < rr->rr_cols; i++) {
if (!abd_is_linear(rr->rr_col[i].rc_abd)) {
bufs = kmem_alloc(rr->rr_cols * sizeof (abd_t *),
KM_PUSHPAGE);
for (c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *col = &rr->rr_col[c];
bufs[c] = col->rc_abd;
if (bufs[c] != NULL) {
col->rc_abd = abd_alloc_linear(
col->rc_size, B_TRUE);
abd_copy(col->rc_abd, bufs[c],
col->rc_size);
}
}
break;
}
}
n = rr->rr_cols - rr->rr_firstdatacol;
/*
* Figure out which data columns are missing.
*/
nmissing_rows = 0;
for (t = 0; t < ntgts; t++) {
if (tgts[t] >= rr->rr_firstdatacol) {
missing_rows[nmissing_rows++] =
tgts[t] - rr->rr_firstdatacol;
}
}
/*
* Figure out which parity columns to use to help generate the missing
* data columns.
*/
for (tt = 0, c = 0, i = 0; i < nmissing_rows; c++) {
ASSERT(tt < ntgts);
ASSERT(c < rr->rr_firstdatacol);
/*
* Skip any targeted parity columns.
*/
if (c == tgts[tt]) {
tt++;
continue;
}
parity_map[i] = c;
i++;
}
psize = (sizeof (rows[0][0]) + sizeof (invrows[0][0])) *
nmissing_rows * n + sizeof (used[0]) * n;
p = kmem_alloc(psize, KM_SLEEP);
for (pp = p, i = 0; i < nmissing_rows; i++) {
rows[i] = pp;
pp += n;
invrows[i] = pp;
pp += n;
}
used = pp;
for (i = 0; i < nmissing_rows; i++) {
used[i] = parity_map[i];
}
for (tt = 0, c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
if (tt < nmissing_rows &&
c == missing_rows[tt] + rr->rr_firstdatacol) {
tt++;
continue;
}
ASSERT3S(i, <, n);
used[i] = c;
i++;
}
/*
* Initialize the interesting rows of the matrix.
*/
vdev_raidz_matrix_init(rr, n, nmissing_rows, parity_map, rows);
/*
* Invert the matrix.
*/
vdev_raidz_matrix_invert(rr, n, nmissing_rows, missing_rows, rows,
invrows, used);
/*
* Reconstruct the missing data using the generated matrix.
*/
vdev_raidz_matrix_reconstruct(rr, n, nmissing_rows, missing_rows,
invrows, used);
kmem_free(p, psize);
/*
* copy back from temporary linear abds and free them
*/
if (bufs) {
for (c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *col = &rr->rr_col[c];
if (bufs[c] != NULL) {
abd_copy(bufs[c], col->rc_abd, col->rc_size);
abd_free(col->rc_abd);
}
col->rc_abd = bufs[c];
}
kmem_free(bufs, rr->rr_cols * sizeof (abd_t *));
}
}
static void
vdev_raidz_reconstruct_row(raidz_map_t *rm, raidz_row_t *rr,
const int *t, int nt)
{
int tgts[VDEV_RAIDZ_MAXPARITY], *dt;
int ntgts;
int i, c, ret;
int nbadparity, nbaddata;
int parity_valid[VDEV_RAIDZ_MAXPARITY];
nbadparity = rr->rr_firstdatacol;
nbaddata = rr->rr_cols - nbadparity;
ntgts = 0;
for (i = 0, c = 0; c < rr->rr_cols; c++) {
if (c < rr->rr_firstdatacol)
parity_valid[c] = B_FALSE;
if (i < nt && c == t[i]) {
tgts[ntgts++] = c;
i++;
} else if (rr->rr_col[c].rc_error != 0) {
tgts[ntgts++] = c;
} else if (c >= rr->rr_firstdatacol) {
nbaddata--;
} else {
parity_valid[c] = B_TRUE;
nbadparity--;
}
}
ASSERT(ntgts >= nt);
ASSERT(nbaddata >= 0);
ASSERT(nbaddata + nbadparity == ntgts);
dt = &tgts[nbadparity];
/* Reconstruct using the new math implementation */
ret = vdev_raidz_math_reconstruct(rm, rr, parity_valid, dt, nbaddata);
if (ret != RAIDZ_ORIGINAL_IMPL)
return;
/*
* See if we can use any of our optimized reconstruction routines.
*/
switch (nbaddata) {
case 1:
if (parity_valid[VDEV_RAIDZ_P]) {
vdev_raidz_reconstruct_p(rr, dt, 1);
return;
}
ASSERT(rr->rr_firstdatacol > 1);
if (parity_valid[VDEV_RAIDZ_Q]) {
vdev_raidz_reconstruct_q(rr, dt, 1);
return;
}
ASSERT(rr->rr_firstdatacol > 2);
break;
case 2:
ASSERT(rr->rr_firstdatacol > 1);
if (parity_valid[VDEV_RAIDZ_P] &&
parity_valid[VDEV_RAIDZ_Q]) {
vdev_raidz_reconstruct_pq(rr, dt, 2);
return;
}
ASSERT(rr->rr_firstdatacol > 2);
break;
}
vdev_raidz_reconstruct_general(rr, tgts, ntgts);
}
static int
vdev_raidz_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
uint64_t nparity = vdrz->vd_nparity;
int c;
int lasterror = 0;
int numerrors = 0;
ASSERT(nparity > 0);
if (nparity > VDEV_RAIDZ_MAXPARITY ||
vd->vdev_children < nparity + 1) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (SET_ERROR(EINVAL));
}
vdev_open_children(vd);
for (c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (cvd->vdev_open_error != 0) {
lasterror = cvd->vdev_open_error;
numerrors++;
continue;
}
*asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1;
*max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1;
*logical_ashift = MAX(*logical_ashift, cvd->vdev_ashift);
}
for (c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (cvd->vdev_open_error != 0)
continue;
*physical_ashift = vdev_best_ashift(*logical_ashift,
*physical_ashift, cvd->vdev_physical_ashift);
}
*asize *= vd->vdev_children;
*max_asize *= vd->vdev_children;
if (numerrors > nparity) {
vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
return (lasterror);
}
return (0);
}
static void
vdev_raidz_close(vdev_t *vd)
{
for (int c = 0; c < vd->vdev_children; c++) {
if (vd->vdev_child[c] != NULL)
vdev_close(vd->vdev_child[c]);
}
}
static uint64_t
vdev_raidz_asize(vdev_t *vd, uint64_t psize)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
uint64_t asize;
uint64_t ashift = vd->vdev_top->vdev_ashift;
uint64_t cols = vdrz->vd_logical_width;
uint64_t nparity = vdrz->vd_nparity;
asize = ((psize - 1) >> ashift) + 1;
asize += nparity * ((asize + cols - nparity - 1) / (cols - nparity));
asize = roundup(asize, nparity + 1) << ashift;
return (asize);
}
/*
* The allocatable space for a raidz vdev is N * sizeof(smallest child)
* so each child must provide at least 1/Nth of its asize.
*/
static uint64_t
vdev_raidz_min_asize(vdev_t *vd)
{
return ((vd->vdev_min_asize + vd->vdev_children - 1) /
vd->vdev_children);
}
void
vdev_raidz_child_done(zio_t *zio)
{
raidz_col_t *rc = zio->io_private;
ASSERT3P(rc->rc_abd, !=, NULL);
rc->rc_error = zio->io_error;
rc->rc_tried = 1;
rc->rc_skipped = 0;
}
static void
vdev_raidz_io_verify(vdev_t *vd, raidz_row_t *rr, int col)
{
#ifdef ZFS_DEBUG
vdev_t *tvd = vd->vdev_top;
range_seg64_t logical_rs, physical_rs, remain_rs;
logical_rs.rs_start = rr->rr_offset;
logical_rs.rs_end = logical_rs.rs_start +
vdev_raidz_asize(vd, rr->rr_size);
raidz_col_t *rc = &rr->rr_col[col];
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
vdev_xlate(cvd, &logical_rs, &physical_rs, &remain_rs);
ASSERT(vdev_xlate_is_empty(&remain_rs));
ASSERT3U(rc->rc_offset, ==, physical_rs.rs_start);
ASSERT3U(rc->rc_offset, <, physical_rs.rs_end);
/*
* It would be nice to assert that rs_end is equal
* to rc_offset + rc_size but there might be an
* optional I/O at the end that is not accounted in
* rc_size.
*/
if (physical_rs.rs_end > rc->rc_offset + rc->rc_size) {
ASSERT3U(physical_rs.rs_end, ==, rc->rc_offset +
rc->rc_size + (1 << tvd->vdev_ashift));
} else {
ASSERT3U(physical_rs.rs_end, ==, rc->rc_offset + rc->rc_size);
}
#endif
}
static void
vdev_raidz_io_start_write(zio_t *zio, raidz_row_t *rr, uint64_t ashift)
{
vdev_t *vd = zio->io_vd;
raidz_map_t *rm = zio->io_vsd;
vdev_raidz_generate_parity_row(rm, rr);
for (int c = 0; c < rr->rr_scols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
/* Verify physical to logical translation */
vdev_raidz_io_verify(vd, rr, c);
if (rc->rc_size > 0) {
ASSERT3P(rc->rc_abd, !=, NULL);
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
rc->rc_offset, rc->rc_abd,
abd_get_size(rc->rc_abd), zio->io_type,
zio->io_priority, 0, vdev_raidz_child_done, rc));
} else {
/*
* Generate optional write for skip sector to improve
* aggregation contiguity.
*/
ASSERT3P(rc->rc_abd, ==, NULL);
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
rc->rc_offset, NULL, 1ULL << ashift,
zio->io_type, zio->io_priority,
ZIO_FLAG_NODATA | ZIO_FLAG_OPTIONAL, NULL,
NULL));
}
}
}
static void
vdev_raidz_io_start_read(zio_t *zio, raidz_row_t *rr)
{
vdev_t *vd = zio->io_vd;
/*
* Iterate over the columns in reverse order so that we hit the parity
* last -- any errors along the way will force us to read the parity.
*/
for (int c = rr->rr_cols - 1; c >= 0; c--) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_size == 0)
continue;
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
if (!vdev_readable(cvd)) {
if (c >= rr->rr_firstdatacol)
rr->rr_missingdata++;
else
rr->rr_missingparity++;
rc->rc_error = SET_ERROR(ENXIO);
rc->rc_tried = 1; /* don't even try */
rc->rc_skipped = 1;
continue;
}
if (vdev_dtl_contains(cvd, DTL_MISSING, zio->io_txg, 1)) {
if (c >= rr->rr_firstdatacol)
rr->rr_missingdata++;
else
rr->rr_missingparity++;
rc->rc_error = SET_ERROR(ESTALE);
rc->rc_skipped = 1;
continue;
}
if (c >= rr->rr_firstdatacol || rr->rr_missingdata > 0 ||
(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
rc->rc_offset, rc->rc_abd, rc->rc_size,
zio->io_type, zio->io_priority, 0,
vdev_raidz_child_done, rc));
}
}
}
/*
* Start an IO operation on a RAIDZ VDev
*
* Outline:
* - For write operations:
* 1. Generate the parity data
* 2. Create child zio write operations to each column's vdev, for both
* data and parity.
* 3. If the column skips any sectors for padding, create optional dummy
* write zio children for those areas to improve aggregation continuity.
* - For read operations:
* 1. Create child zio read operations to each data column's vdev to read
* the range of data required for zio.
* 2. If this is a scrub or resilver operation, or if any of the data
* vdevs have had errors, then create zio read operations to the parity
* columns' VDevs as well.
*/
static void
vdev_raidz_io_start(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
vdev_t *tvd = vd->vdev_top;
vdev_raidz_t *vdrz = vd->vdev_tsd;
raidz_map_t *rm = vdev_raidz_map_alloc(zio, tvd->vdev_ashift,
vdrz->vd_logical_width, vdrz->vd_nparity);
zio->io_vsd = rm;
zio->io_vsd_ops = &vdev_raidz_vsd_ops;
/*
* Until raidz expansion is implemented all maps for a raidz vdev
* contain a single row.
*/
ASSERT3U(rm->rm_nrows, ==, 1);
raidz_row_t *rr = rm->rm_row[0];
if (zio->io_type == ZIO_TYPE_WRITE) {
vdev_raidz_io_start_write(zio, rr, tvd->vdev_ashift);
} else {
ASSERT(zio->io_type == ZIO_TYPE_READ);
vdev_raidz_io_start_read(zio, rr);
}
zio_execute(zio);
}
/*
* Report a checksum error for a child of a RAID-Z device.
*/
void
vdev_raidz_checksum_error(zio_t *zio, raidz_col_t *rc, abd_t *bad_data)
{
vdev_t *vd = zio->io_vd->vdev_child[rc->rc_devidx];
if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE) &&
zio->io_priority != ZIO_PRIORITY_REBUILD) {
zio_bad_cksum_t zbc;
raidz_map_t *rm = zio->io_vsd;
zbc.zbc_has_cksum = 0;
zbc.zbc_injected = rm->rm_ecksuminjected;
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_checksum_errors++;
mutex_exit(&vd->vdev_stat_lock);
(void) zfs_ereport_post_checksum(zio->io_spa, vd,
&zio->io_bookmark, zio, rc->rc_offset, rc->rc_size,
rc->rc_abd, bad_data, &zbc);
}
}
/*
* We keep track of whether or not there were any injected errors, so that
* any ereports we generate can note it.
*/
static int
raidz_checksum_verify(zio_t *zio)
{
- zio_bad_cksum_t zbc = {{{0}}};
+ zio_bad_cksum_t zbc = {0};
raidz_map_t *rm = zio->io_vsd;
int ret = zio_checksum_error(zio, &zbc);
if (ret != 0 && zbc.zbc_injected != 0)
rm->rm_ecksuminjected = 1;
return (ret);
}
/*
* Generate the parity from the data columns. If we tried and were able to
* read the parity without error, verify that the generated parity matches the
* data we read. If it doesn't, we fire off a checksum error. Return the
* number of such failures.
*/
static int
raidz_parity_verify(zio_t *zio, raidz_row_t *rr)
{
abd_t *orig[VDEV_RAIDZ_MAXPARITY];
int c, ret = 0;
raidz_map_t *rm = zio->io_vsd;
raidz_col_t *rc;
blkptr_t *bp = zio->io_bp;
enum zio_checksum checksum = (bp == NULL ? zio->io_prop.zp_checksum :
(BP_IS_GANG(bp) ? ZIO_CHECKSUM_GANG_HEADER : BP_GET_CHECKSUM(bp)));
if (checksum == ZIO_CHECKSUM_NOPARITY)
return (ret);
for (c = 0; c < rr->rr_firstdatacol; c++) {
rc = &rr->rr_col[c];
if (!rc->rc_tried || rc->rc_error != 0)
continue;
orig[c] = rc->rc_abd;
ASSERT3U(abd_get_size(rc->rc_abd), ==, rc->rc_size);
rc->rc_abd = abd_alloc_linear(rc->rc_size, B_FALSE);
}
/*
* Verify any empty sectors are zero filled to ensure the parity
* is calculated correctly even if these non-data sectors are damaged.
*/
if (rr->rr_nempty && rr->rr_abd_empty != NULL)
ret += vdev_draid_map_verify_empty(zio, rr);
/*
* Regenerates parity even for !tried||rc_error!=0 columns. This
* isn't harmful but it does have the side effect of fixing stuff
* we didn't realize was necessary (i.e. even if we return 0).
*/
vdev_raidz_generate_parity_row(rm, rr);
for (c = 0; c < rr->rr_firstdatacol; c++) {
rc = &rr->rr_col[c];
if (!rc->rc_tried || rc->rc_error != 0)
continue;
if (abd_cmp(orig[c], rc->rc_abd) != 0) {
vdev_raidz_checksum_error(zio, rc, orig[c]);
rc->rc_error = SET_ERROR(ECKSUM);
ret++;
}
abd_free(orig[c]);
}
return (ret);
}
static int
vdev_raidz_worst_error(raidz_row_t *rr)
{
int error = 0;
for (int c = 0; c < rr->rr_cols; c++)
error = zio_worst_error(error, rr->rr_col[c].rc_error);
return (error);
}
static void
vdev_raidz_io_done_verified(zio_t *zio, raidz_row_t *rr)
{
int unexpected_errors = 0;
int parity_errors = 0;
int parity_untried = 0;
int data_errors = 0;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_error) {
if (c < rr->rr_firstdatacol)
parity_errors++;
else
data_errors++;
if (!rc->rc_skipped)
unexpected_errors++;
} else if (c < rr->rr_firstdatacol && !rc->rc_tried) {
parity_untried++;
}
if (rc->rc_force_repair)
unexpected_errors++;
}
/*
* If we read more parity disks than were used for
* reconstruction, confirm that the other parity disks produced
* correct data.
*
* Note that we also regenerate parity when resilvering so we
* can write it out to failed devices later.
*/
if (parity_errors + parity_untried <
rr->rr_firstdatacol - data_errors ||
(zio->io_flags & ZIO_FLAG_RESILVER)) {
int n = raidz_parity_verify(zio, rr);
unexpected_errors += n;
}
if (zio->io_error == 0 && spa_writeable(zio->io_spa) &&
(unexpected_errors > 0 || (zio->io_flags & ZIO_FLAG_RESILVER))) {
/*
* Use the good data we have in hand to repair damaged children.
*/
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
vdev_t *vd = zio->io_vd;
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
if (!rc->rc_allow_repair) {
continue;
} else if (!rc->rc_force_repair &&
(rc->rc_error == 0 || rc->rc_size == 0)) {
continue;
}
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
rc->rc_offset, rc->rc_abd, rc->rc_size,
ZIO_TYPE_WRITE,
zio->io_priority == ZIO_PRIORITY_REBUILD ?
ZIO_PRIORITY_REBUILD : ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_IO_REPAIR | (unexpected_errors ?
ZIO_FLAG_SELF_HEAL : 0), NULL, NULL));
}
}
}
static void
raidz_restore_orig_data(raidz_map_t *rm)
{
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_need_orig_restore) {
abd_copy(rc->rc_abd,
rc->rc_orig_data, rc->rc_size);
rc->rc_need_orig_restore = B_FALSE;
}
}
}
}
/*
* returns EINVAL if reconstruction of the block will not be possible
* returns ECKSUM if this specific reconstruction failed
* returns 0 on successful reconstruction
*/
static int
raidz_reconstruct(zio_t *zio, int *ltgts, int ntgts, int nparity)
{
raidz_map_t *rm = zio->io_vsd;
/* Reconstruct each row */
for (int r = 0; r < rm->rm_nrows; r++) {
raidz_row_t *rr = rm->rm_row[r];
int my_tgts[VDEV_RAIDZ_MAXPARITY]; /* value is child id */
int t = 0;
int dead = 0;
int dead_data = 0;
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
ASSERT0(rc->rc_need_orig_restore);
if (rc->rc_error != 0) {
dead++;
if (c >= nparity)
dead_data++;
continue;
}
if (rc->rc_size == 0)
continue;
for (int lt = 0; lt < ntgts; lt++) {
if (rc->rc_devidx == ltgts[lt]) {
if (rc->rc_orig_data == NULL) {
rc->rc_orig_data =
abd_alloc_linear(
rc->rc_size, B_TRUE);
abd_copy(rc->rc_orig_data,
rc->rc_abd, rc->rc_size);
}
rc->rc_need_orig_restore = B_TRUE;
dead++;
if (c >= nparity)
dead_data++;
my_tgts[t++] = c;
break;
}
}
}
if (dead > nparity) {
/* reconstruction not possible */
raidz_restore_orig_data(rm);
return (EINVAL);
}
if (dead_data > 0)
vdev_raidz_reconstruct_row(rm, rr, my_tgts, t);
}
/* Check for success */
if (raidz_checksum_verify(zio) == 0) {
/* Reconstruction succeeded - report errors */
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_need_orig_restore) {
/*
* Note: if this is a parity column,
* we don't really know if it's wrong.
* We need to let
* vdev_raidz_io_done_verified() check
* it, and if we set rc_error, it will
* think that it is a "known" error
* that doesn't need to be checked
* or corrected.
*/
if (rc->rc_error == 0 &&
c >= rr->rr_firstdatacol) {
vdev_raidz_checksum_error(zio,
rc, rc->rc_orig_data);
rc->rc_error =
SET_ERROR(ECKSUM);
}
rc->rc_need_orig_restore = B_FALSE;
}
}
vdev_raidz_io_done_verified(zio, rr);
}
zio_checksum_verified(zio);
return (0);
}
/* Reconstruction failed - restore original data */
raidz_restore_orig_data(rm);
return (ECKSUM);
}
/*
* Iterate over all combinations of N bad vdevs and attempt a reconstruction.
* Note that the algorithm below is non-optimal because it doesn't take into
* account how reconstruction is actually performed. For example, with
* triple-parity RAID-Z the reconstruction procedure is the same if column 4
* is targeted as invalid as if columns 1 and 4 are targeted since in both
* cases we'd only use parity information in column 0.
*
* The order that we find the various possible combinations of failed
* disks is dictated by these rules:
* - Examine each "slot" (the "i" in tgts[i])
* - Try to increment this slot (tgts[i] = tgts[i] + 1)
* - if we can't increment because it runs into the next slot,
* reset our slot to the minimum, and examine the next slot
*
* For example, with a 6-wide RAIDZ3, and no known errors (so we have to choose
* 3 columns to reconstruct), we will generate the following sequence:
*
* STATE ACTION
* 0 1 2 special case: skip since these are all parity
* 0 1 3 first slot: reset to 0; middle slot: increment to 2
* 0 2 3 first slot: increment to 1
* 1 2 3 first: reset to 0; middle: reset to 1; last: increment to 4
* 0 1 4 first: reset to 0; middle: increment to 2
* 0 2 4 first: increment to 1
* 1 2 4 first: reset to 0; middle: increment to 3
* 0 3 4 first: increment to 1
* 1 3 4 first: increment to 2
* 2 3 4 first: reset to 0; middle: reset to 1; last: increment to 5
* 0 1 5 first: reset to 0; middle: increment to 2
* 0 2 5 first: increment to 1
* 1 2 5 first: reset to 0; middle: increment to 3
* 0 3 5 first: increment to 1
* 1 3 5 first: increment to 2
* 2 3 5 first: reset to 0; middle: increment to 4
* 0 4 5 first: increment to 1
* 1 4 5 first: increment to 2
* 2 4 5 first: increment to 3
* 3 4 5 done
*
* This strategy works for dRAID but is less efficient when there are a large
* number of child vdevs and therefore permutations to check. Furthermore,
* since the raidz_map_t rows likely do not overlap reconstruction would be
* possible as long as there are no more than nparity data errors per row.
* These additional permutations are not currently checked but could be as
* a future improvement.
*/
static int
vdev_raidz_combrec(zio_t *zio)
{
int nparity = vdev_get_nparity(zio->io_vd);
raidz_map_t *rm = zio->io_vsd;
/* Check if there's enough data to attempt reconstrution. */
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
int total_errors = 0;
for (int c = 0; c < rr->rr_cols; c++) {
if (rr->rr_col[c].rc_error)
total_errors++;
}
if (total_errors > nparity)
return (vdev_raidz_worst_error(rr));
}
for (int num_failures = 1; num_failures <= nparity; num_failures++) {
int tstore[VDEV_RAIDZ_MAXPARITY + 2];
int *ltgts = &tstore[1]; /* value is logical child ID */
/* Determine number of logical children, n */
int n = zio->io_vd->vdev_children;
ASSERT3U(num_failures, <=, nparity);
ASSERT3U(num_failures, <=, VDEV_RAIDZ_MAXPARITY);
/* Handle corner cases in combrec logic */
ltgts[-1] = -1;
for (int i = 0; i < num_failures; i++) {
ltgts[i] = i;
}
ltgts[num_failures] = n;
for (;;) {
int err = raidz_reconstruct(zio, ltgts, num_failures,
nparity);
if (err == EINVAL) {
/*
* Reconstruction not possible with this #
* failures; try more failures.
*/
break;
} else if (err == 0)
return (0);
/* Compute next targets to try */
for (int t = 0; ; t++) {
ASSERT3U(t, <, num_failures);
ltgts[t]++;
if (ltgts[t] == n) {
/* try more failures */
ASSERT3U(t, ==, num_failures - 1);
break;
}
ASSERT3U(ltgts[t], <, n);
ASSERT3U(ltgts[t], <=, ltgts[t + 1]);
/*
* If that spot is available, we're done here.
* Try the next combination.
*/
if (ltgts[t] != ltgts[t + 1])
break;
/*
* Otherwise, reset this tgt to the minimum,
* and move on to the next tgt.
*/
ltgts[t] = ltgts[t - 1] + 1;
ASSERT3U(ltgts[t], ==, t);
}
/* Increase the number of failures and keep trying. */
if (ltgts[num_failures - 1] == n)
break;
}
}
return (ECKSUM);
}
void
vdev_raidz_reconstruct(raidz_map_t *rm, const int *t, int nt)
{
for (uint64_t row = 0; row < rm->rm_nrows; row++) {
raidz_row_t *rr = rm->rm_row[row];
vdev_raidz_reconstruct_row(rm, rr, t, nt);
}
}
/*
* Complete a write IO operation on a RAIDZ VDev
*
* Outline:
* 1. Check for errors on the child IOs.
* 2. Return, setting an error code if too few child VDevs were written
* to reconstruct the data later. Note that partial writes are
* considered successful if they can be reconstructed at all.
*/
static void
vdev_raidz_io_done_write_impl(zio_t *zio, raidz_row_t *rr)
{
int total_errors = 0;
ASSERT3U(rr->rr_missingparity, <=, rr->rr_firstdatacol);
ASSERT3U(rr->rr_missingdata, <=, rr->rr_cols - rr->rr_firstdatacol);
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_error) {
ASSERT(rc->rc_error != ECKSUM); /* child has no bp */
total_errors++;
}
}
/*
* Treat partial writes as a success. If we couldn't write enough
* columns to reconstruct the data, the I/O failed. Otherwise,
* good enough.
*
* Now that we support write reallocation, it would be better
* to treat partial failure as real failure unless there are
* no non-degraded top-level vdevs left, and not update DTLs
* if we intend to reallocate.
*/
if (total_errors > rr->rr_firstdatacol) {
zio->io_error = zio_worst_error(zio->io_error,
vdev_raidz_worst_error(rr));
}
}
static void
vdev_raidz_io_done_reconstruct_known_missing(zio_t *zio, raidz_map_t *rm,
raidz_row_t *rr)
{
int parity_errors = 0;
int parity_untried = 0;
int data_errors = 0;
int total_errors = 0;
ASSERT3U(rr->rr_missingparity, <=, rr->rr_firstdatacol);
ASSERT3U(rr->rr_missingdata, <=, rr->rr_cols - rr->rr_firstdatacol);
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
/*
* If scrubbing and a replacing/sparing child vdev determined
* that not all of its children have an identical copy of the
* data, then clear the error so the column is treated like
* any other read and force a repair to correct the damage.
*/
if (rc->rc_error == ECKSUM) {
ASSERT(zio->io_flags & ZIO_FLAG_SCRUB);
vdev_raidz_checksum_error(zio, rc, rc->rc_abd);
rc->rc_force_repair = 1;
rc->rc_error = 0;
}
if (rc->rc_error) {
if (c < rr->rr_firstdatacol)
parity_errors++;
else
data_errors++;
total_errors++;
} else if (c < rr->rr_firstdatacol && !rc->rc_tried) {
parity_untried++;
}
}
/*
* If there were data errors and the number of errors we saw was
* correctable -- less than or equal to the number of parity disks read
* -- reconstruct based on the missing data.
*/
if (data_errors != 0 &&
total_errors <= rr->rr_firstdatacol - parity_untried) {
/*
* We either attempt to read all the parity columns or
* none of them. If we didn't try to read parity, we
* wouldn't be here in the correctable case. There must
* also have been fewer parity errors than parity
* columns or, again, we wouldn't be in this code path.
*/
ASSERT(parity_untried == 0);
ASSERT(parity_errors < rr->rr_firstdatacol);
/*
* Identify the data columns that reported an error.
*/
int n = 0;
int tgts[VDEV_RAIDZ_MAXPARITY];
for (int c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_error != 0) {
ASSERT(n < VDEV_RAIDZ_MAXPARITY);
tgts[n++] = c;
}
}
ASSERT(rr->rr_firstdatacol >= n);
vdev_raidz_reconstruct_row(rm, rr, tgts, n);
}
}
/*
* Return the number of reads issued.
*/
static int
vdev_raidz_read_all(zio_t *zio, raidz_row_t *rr)
{
vdev_t *vd = zio->io_vd;
int nread = 0;
rr->rr_missingdata = 0;
rr->rr_missingparity = 0;
/*
* If this rows contains empty sectors which are not required
* for a normal read then allocate an ABD for them now so they
* may be read, verified, and any needed repairs performed.
*/
if (rr->rr_nempty && rr->rr_abd_empty == NULL)
vdev_draid_map_alloc_empty(zio, rr);
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_tried || rc->rc_size == 0)
continue;
zio_nowait(zio_vdev_child_io(zio, NULL,
vd->vdev_child[rc->rc_devidx],
rc->rc_offset, rc->rc_abd, rc->rc_size,
zio->io_type, zio->io_priority, 0,
vdev_raidz_child_done, rc));
nread++;
}
return (nread);
}
/*
* We're here because either there were too many errors to even attempt
* reconstruction (total_errors == rm_first_datacol), or vdev_*_combrec()
* failed. In either case, there is enough bad data to prevent reconstruction.
* Start checksum ereports for all children which haven't failed.
*/
static void
vdev_raidz_io_done_unrecoverable(zio_t *zio)
{
raidz_map_t *rm = zio->io_vsd;
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
vdev_t *cvd = zio->io_vd->vdev_child[rc->rc_devidx];
if (rc->rc_error != 0)
continue;
zio_bad_cksum_t zbc;
zbc.zbc_has_cksum = 0;
zbc.zbc_injected = rm->rm_ecksuminjected;
mutex_enter(&cvd->vdev_stat_lock);
cvd->vdev_stat.vs_checksum_errors++;
mutex_exit(&cvd->vdev_stat_lock);
(void) zfs_ereport_start_checksum(zio->io_spa,
cvd, &zio->io_bookmark, zio, rc->rc_offset,
rc->rc_size, &zbc);
}
}
}
void
vdev_raidz_io_done(zio_t *zio)
{
raidz_map_t *rm = zio->io_vsd;
if (zio->io_type == ZIO_TYPE_WRITE) {
for (int i = 0; i < rm->rm_nrows; i++) {
vdev_raidz_io_done_write_impl(zio, rm->rm_row[i]);
}
} else {
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
vdev_raidz_io_done_reconstruct_known_missing(zio,
rm, rr);
}
if (raidz_checksum_verify(zio) == 0) {
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
vdev_raidz_io_done_verified(zio, rr);
}
zio_checksum_verified(zio);
} else {
/*
* A sequential resilver has no checksum which makes
* combinatoral reconstruction impossible. This code
* path is unreachable since raidz_checksum_verify()
* has no checksum to verify and must succeed.
*/
ASSERT3U(zio->io_priority, !=, ZIO_PRIORITY_REBUILD);
/*
* This isn't a typical situation -- either we got a
* read error or a child silently returned bad data.
* Read every block so we can try again with as much
* data and parity as we can track down. If we've
* already been through once before, all children will
* be marked as tried so we'll proceed to combinatorial
* reconstruction.
*/
int nread = 0;
for (int i = 0; i < rm->rm_nrows; i++) {
nread += vdev_raidz_read_all(zio,
rm->rm_row[i]);
}
if (nread != 0) {
/*
* Normally our stage is VDEV_IO_DONE, but if
* we've already called redone(), it will have
* changed to VDEV_IO_START, in which case we
* don't want to call redone() again.
*/
if (zio->io_stage != ZIO_STAGE_VDEV_IO_START)
zio_vdev_io_redone(zio);
return;
}
zio->io_error = vdev_raidz_combrec(zio);
if (zio->io_error == ECKSUM &&
!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
vdev_raidz_io_done_unrecoverable(zio);
}
}
}
}
static void
vdev_raidz_state_change(vdev_t *vd, int faulted, int degraded)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
if (faulted > vdrz->vd_nparity)
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_NO_REPLICAS);
else if (degraded + faulted != 0)
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
else
vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
}
/*
* Determine if any portion of the provided block resides on a child vdev
* with a dirty DTL and therefore needs to be resilvered. The function
* assumes that at least one DTL is dirty which implies that full stripe
* width blocks must be resilvered.
*/
static boolean_t
vdev_raidz_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
uint64_t dcols = vd->vdev_children;
uint64_t nparity = vdrz->vd_nparity;
uint64_t ashift = vd->vdev_top->vdev_ashift;
/* The starting RAIDZ (parent) vdev sector of the block. */
uint64_t b = DVA_GET_OFFSET(dva) >> ashift;
/* The zio's size in units of the vdev's minimum sector size. */
uint64_t s = ((psize - 1) >> ashift) + 1;
/* The first column for this stripe. */
uint64_t f = b % dcols;
/* Unreachable by sequential resilver. */
ASSERT3U(phys_birth, !=, TXG_UNKNOWN);
if (!vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1))
return (B_FALSE);
if (s + nparity >= dcols)
return (B_TRUE);
for (uint64_t c = 0; c < s + nparity; c++) {
uint64_t devidx = (f + c) % dcols;
vdev_t *cvd = vd->vdev_child[devidx];
/*
* dsl_scan_need_resilver() already checked vd with
* vdev_dtl_contains(). So here just check cvd with
* vdev_dtl_empty(), cheaper and a good approximation.
*/
if (!vdev_dtl_empty(cvd, DTL_PARTIAL))
return (B_TRUE);
}
return (B_FALSE);
}
static void
vdev_raidz_xlate(vdev_t *cvd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
{
(void) remain_rs;
vdev_t *raidvd = cvd->vdev_parent;
ASSERT(raidvd->vdev_ops == &vdev_raidz_ops);
uint64_t width = raidvd->vdev_children;
uint64_t tgt_col = cvd->vdev_id;
uint64_t ashift = raidvd->vdev_top->vdev_ashift;
/* make sure the offsets are block-aligned */
ASSERT0(logical_rs->rs_start % (1 << ashift));
ASSERT0(logical_rs->rs_end % (1 << ashift));
uint64_t b_start = logical_rs->rs_start >> ashift;
uint64_t b_end = logical_rs->rs_end >> ashift;
uint64_t start_row = 0;
if (b_start > tgt_col) /* avoid underflow */
start_row = ((b_start - tgt_col - 1) / width) + 1;
uint64_t end_row = 0;
if (b_end > tgt_col)
end_row = ((b_end - tgt_col - 1) / width) + 1;
physical_rs->rs_start = start_row << ashift;
physical_rs->rs_end = end_row << ashift;
ASSERT3U(physical_rs->rs_start, <=, logical_rs->rs_start);
ASSERT3U(physical_rs->rs_end - physical_rs->rs_start, <=,
logical_rs->rs_end - logical_rs->rs_start);
}
/*
* Initialize private RAIDZ specific fields from the nvlist.
*/
static int
vdev_raidz_init(spa_t *spa, nvlist_t *nv, void **tsd)
{
vdev_raidz_t *vdrz;
uint64_t nparity;
uint_t children;
nvlist_t **child;
int error = nvlist_lookup_nvlist_array(nv,
ZPOOL_CONFIG_CHILDREN, &child, &children);
if (error != 0)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, &nparity) == 0) {
if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY)
return (SET_ERROR(EINVAL));
/*
* Previous versions could only support 1 or 2 parity
* device.
*/
if (nparity > 1 && spa_version(spa) < SPA_VERSION_RAIDZ2)
return (SET_ERROR(EINVAL));
else if (nparity > 2 && spa_version(spa) < SPA_VERSION_RAIDZ3)
return (SET_ERROR(EINVAL));
} else {
/*
* We require the parity to be specified for SPAs that
* support multiple parity levels.
*/
if (spa_version(spa) >= SPA_VERSION_RAIDZ2)
return (SET_ERROR(EINVAL));
/*
* Otherwise, we default to 1 parity device for RAID-Z.
*/
nparity = 1;
}
vdrz = kmem_zalloc(sizeof (*vdrz), KM_SLEEP);
vdrz->vd_logical_width = children;
vdrz->vd_nparity = nparity;
*tsd = vdrz;
return (0);
}
static void
vdev_raidz_fini(vdev_t *vd)
{
kmem_free(vd->vdev_tsd, sizeof (vdev_raidz_t));
}
/*
* Add RAIDZ specific fields to the config nvlist.
*/
static void
vdev_raidz_config_generate(vdev_t *vd, nvlist_t *nv)
{
ASSERT3P(vd->vdev_ops, ==, &vdev_raidz_ops);
vdev_raidz_t *vdrz = vd->vdev_tsd;
/*
* Make sure someone hasn't managed to sneak a fancy new vdev
* into a crufty old storage pool.
*/
ASSERT(vdrz->vd_nparity == 1 ||
(vdrz->vd_nparity <= 2 &&
spa_version(vd->vdev_spa) >= SPA_VERSION_RAIDZ2) ||
(vdrz->vd_nparity <= 3 &&
spa_version(vd->vdev_spa) >= SPA_VERSION_RAIDZ3));
/*
* Note that we'll add these even on storage pools where they
* aren't strictly required -- older software will just ignore
* it.
*/
fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, vdrz->vd_nparity);
}
static uint64_t
vdev_raidz_nparity(vdev_t *vd)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
return (vdrz->vd_nparity);
}
static uint64_t
vdev_raidz_ndisks(vdev_t *vd)
{
return (vd->vdev_children);
}
vdev_ops_t vdev_raidz_ops = {
.vdev_op_init = vdev_raidz_init,
.vdev_op_fini = vdev_raidz_fini,
.vdev_op_open = vdev_raidz_open,
.vdev_op_close = vdev_raidz_close,
.vdev_op_asize = vdev_raidz_asize,
.vdev_op_min_asize = vdev_raidz_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_raidz_io_start,
.vdev_op_io_done = vdev_raidz_io_done,
.vdev_op_state_change = vdev_raidz_state_change,
.vdev_op_need_resilver = vdev_raidz_need_resilver,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_raidz_xlate,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = vdev_raidz_config_generate,
.vdev_op_nparity = vdev_raidz_nparity,
.vdev_op_ndisks = vdev_raidz_ndisks,
.vdev_op_type = VDEV_TYPE_RAIDZ, /* name of this vdev type */
.vdev_op_leaf = B_FALSE /* not a leaf vdev */
};
diff --git a/sys/contrib/openzfs/module/zfs/zfs_fm.c b/sys/contrib/openzfs/module/zfs/zfs_fm.c
index c42ef048dd74..c4eb74e873db 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_fm.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_fm.c
@@ -1,1597 +1,1572 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2012,2021 by Delphix. All rights reserved.
*/
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/fm/fs/zfs.h>
#include <sys/fm/protocol.h>
#include <sys/fm/util.h>
#include <sys/sysevent.h>
/*
* This general routine is responsible for generating all the different ZFS
* ereports. The payload is dependent on the class, and which arguments are
* supplied to the function:
*
* EREPORT POOL VDEV IO
* block X X X
* data X X
* device X X
* pool X
*
* If we are in a loading state, all errors are chained together by the same
* SPA-wide ENA (Error Numeric Association).
*
* For isolated I/O requests, we get the ENA from the zio_t. The propagation
* gets very complicated due to RAID-Z, gang blocks, and vdev caching. We want
* to chain together all ereports associated with a logical piece of data. For
* read I/Os, there are basically three 'types' of I/O, which form a roughly
* layered diagram:
*
* +---------------+
* | Aggregate I/O | No associated logical data or device
* +---------------+
* |
* V
* +---------------+ Reads associated with a piece of logical data.
* | Read I/O | This includes reads on behalf of RAID-Z,
* +---------------+ mirrors, gang blocks, retries, etc.
* |
* V
* +---------------+ Reads associated with a particular device, but
* | Physical I/O | no logical data. Issued as part of vdev caching
* +---------------+ and I/O aggregation.
*
* Note that 'physical I/O' here is not the same terminology as used in the rest
* of ZIO. Typically, 'physical I/O' simply means that there is no attached
* blockpointer. But I/O with no associated block pointer can still be related
* to a logical piece of data (i.e. RAID-Z requests).
*
* Purely physical I/O always have unique ENAs. They are not related to a
* particular piece of logical data, and therefore cannot be chained together.
* We still generate an ereport, but the DE doesn't correlate it with any
* logical piece of data. When such an I/O fails, the delegated I/O requests
* will issue a retry, which will trigger the 'real' ereport with the correct
* ENA.
*
* We keep track of the ENA for a ZIO chain through the 'io_logical' member.
* When a new logical I/O is issued, we set this to point to itself. Child I/Os
* then inherit this pointer, so that when it is first set subsequent failures
* will use the same ENA. For vdev cache fill and queue aggregation I/O,
* this pointer is set to NULL, and no ereport will be generated (since it
* doesn't actually correspond to any particular device or piece of data,
* and the caller will always retry without caching or queueing anyway).
*
* For checksum errors, we want to include more information about the actual
* error which occurs. Accordingly, we build an ereport when the error is
* noticed, but instead of sending it in immediately, we hang it off of the
* io_cksum_report field of the logical IO. When the logical IO completes
* (successfully or not), zfs_ereport_finish_checksum() is called with the
* good and bad versions of the buffer (if available), and we annotate the
* ereport with information about the differences.
*/
#ifdef _KERNEL
/*
* Duplicate ereport Detection
*
* Some ereports are retained momentarily for detecting duplicates. These
* are kept in a recent_events_node_t in both a time-ordered list and an AVL
* tree of recent unique ereports.
*
* The lifespan of these recent ereports is bounded (15 mins) and a cleaner
* task is used to purge stale entries.
*/
static list_t recent_events_list;
static avl_tree_t recent_events_tree;
static kmutex_t recent_events_lock;
static taskqid_t recent_events_cleaner_tqid;
/*
* Each node is about 128 bytes so 2,000 would consume 1/4 MiB.
*
* This setting can be changed dynamically and setting it to zero
* disables duplicate detection.
*/
static unsigned int zfs_zevent_retain_max = 2000;
/*
* The lifespan for a recent ereport entry. The default of 15 minutes is
* intended to outlive the zfs diagnosis engine's threshold of 10 errors
* over a period of 10 minutes.
*/
static unsigned int zfs_zevent_retain_expire_secs = 900;
typedef enum zfs_subclass {
ZSC_IO,
ZSC_DATA,
ZSC_CHECKSUM
} zfs_subclass_t;
typedef struct {
/* common criteria */
uint64_t re_pool_guid;
uint64_t re_vdev_guid;
int re_io_error;
uint64_t re_io_size;
uint64_t re_io_offset;
zfs_subclass_t re_subclass;
zio_priority_t re_io_priority;
/* logical zio criteria (optional) */
zbookmark_phys_t re_io_bookmark;
/* internal state */
avl_node_t re_tree_link;
list_node_t re_list_link;
uint64_t re_timestamp;
} recent_events_node_t;
static int
recent_events_compare(const void *a, const void *b)
{
const recent_events_node_t *node1 = a;
const recent_events_node_t *node2 = b;
int cmp;
/*
* The comparison order here is somewhat arbitrary.
* What's important is that if every criteria matches, then it
* is a duplicate (i.e. compare returns 0)
*/
if ((cmp = TREE_CMP(node1->re_subclass, node2->re_subclass)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_pool_guid, node2->re_pool_guid)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_vdev_guid, node2->re_vdev_guid)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_io_error, node2->re_io_error)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_io_priority, node2->re_io_priority)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_io_size, node2->re_io_size)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_io_offset, node2->re_io_offset)) != 0)
return (cmp);
const zbookmark_phys_t *zb1 = &node1->re_io_bookmark;
const zbookmark_phys_t *zb2 = &node2->re_io_bookmark;
if ((cmp = TREE_CMP(zb1->zb_objset, zb2->zb_objset)) != 0)
return (cmp);
if ((cmp = TREE_CMP(zb1->zb_object, zb2->zb_object)) != 0)
return (cmp);
if ((cmp = TREE_CMP(zb1->zb_level, zb2->zb_level)) != 0)
return (cmp);
if ((cmp = TREE_CMP(zb1->zb_blkid, zb2->zb_blkid)) != 0)
return (cmp);
return (0);
}
/*
* workaround: vdev properties don't have inheritance
*/
static uint64_t
vdev_prop_get_inherited(vdev_t *vd, vdev_prop_t prop)
{
uint64_t propdef, propval;
propdef = vdev_prop_default_numeric(prop);
switch (prop) {
case VDEV_PROP_CHECKSUM_N:
propval = vd->vdev_checksum_n;
break;
case VDEV_PROP_CHECKSUM_T:
propval = vd->vdev_checksum_t;
break;
case VDEV_PROP_IO_N:
propval = vd->vdev_io_n;
break;
case VDEV_PROP_IO_T:
propval = vd->vdev_io_t;
break;
default:
propval = propdef;
break;
}
if (propval != propdef)
return (propval);
if (vd->vdev_parent == NULL)
return (propdef);
return (vdev_prop_get_inherited(vd->vdev_parent, prop));
}
static void zfs_ereport_schedule_cleaner(void);
/*
* background task to clean stale recent event nodes.
*/
static void
zfs_ereport_cleaner(void *arg)
{
recent_events_node_t *entry;
uint64_t now = gethrtime();
/*
* purge expired entries
*/
mutex_enter(&recent_events_lock);
while ((entry = list_tail(&recent_events_list)) != NULL) {
uint64_t age = NSEC2SEC(now - entry->re_timestamp);
if (age <= zfs_zevent_retain_expire_secs)
break;
/* remove expired node */
avl_remove(&recent_events_tree, entry);
list_remove(&recent_events_list, entry);
kmem_free(entry, sizeof (*entry));
}
/* Restart the cleaner if more entries remain */
recent_events_cleaner_tqid = 0;
if (!list_is_empty(&recent_events_list))
zfs_ereport_schedule_cleaner();
mutex_exit(&recent_events_lock);
}
static void
zfs_ereport_schedule_cleaner(void)
{
ASSERT(MUTEX_HELD(&recent_events_lock));
uint64_t timeout = SEC2NSEC(zfs_zevent_retain_expire_secs + 1);
recent_events_cleaner_tqid = taskq_dispatch_delay(
system_delay_taskq, zfs_ereport_cleaner, NULL, TQ_SLEEP,
ddi_get_lbolt() + NSEC_TO_TICK(timeout));
}
/*
* Clear entries for a given vdev or all vdevs in a pool when vdev == NULL
*/
void
zfs_ereport_clear(spa_t *spa, vdev_t *vd)
{
uint64_t vdev_guid, pool_guid;
ASSERT(vd != NULL || spa != NULL);
if (vd == NULL) {
vdev_guid = 0;
pool_guid = spa_guid(spa);
} else {
vdev_guid = vd->vdev_guid;
pool_guid = 0;
}
mutex_enter(&recent_events_lock);
recent_events_node_t *next = list_head(&recent_events_list);
while (next != NULL) {
recent_events_node_t *entry = next;
next = list_next(&recent_events_list, next);
if (entry->re_vdev_guid == vdev_guid ||
entry->re_pool_guid == pool_guid) {
avl_remove(&recent_events_tree, entry);
list_remove(&recent_events_list, entry);
kmem_free(entry, sizeof (*entry));
}
}
mutex_exit(&recent_events_lock);
}
/*
* Check if an ereport would be a duplicate of one recently posted.
*
* An ereport is considered a duplicate if the set of criteria in
* recent_events_node_t all match.
*
* Only FM_EREPORT_ZFS_IO, FM_EREPORT_ZFS_DATA, and FM_EREPORT_ZFS_CHECKSUM
* are candidates for duplicate checking.
*/
static boolean_t
zfs_ereport_is_duplicate(const char *subclass, spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, zio_t *zio, uint64_t offset, uint64_t size)
{
recent_events_node_t search = {0}, *entry;
if (vd == NULL || zio == NULL)
return (B_FALSE);
if (zfs_zevent_retain_max == 0)
return (B_FALSE);
if (strcmp(subclass, FM_EREPORT_ZFS_IO) == 0)
search.re_subclass = ZSC_IO;
else if (strcmp(subclass, FM_EREPORT_ZFS_DATA) == 0)
search.re_subclass = ZSC_DATA;
else if (strcmp(subclass, FM_EREPORT_ZFS_CHECKSUM) == 0)
search.re_subclass = ZSC_CHECKSUM;
else
return (B_FALSE);
search.re_pool_guid = spa_guid(spa);
search.re_vdev_guid = vd->vdev_guid;
search.re_io_error = zio->io_error;
search.re_io_priority = zio->io_priority;
/* if size is supplied use it over what's in zio */
if (size) {
search.re_io_size = size;
search.re_io_offset = offset;
} else {
search.re_io_size = zio->io_size;
search.re_io_offset = zio->io_offset;
}
/* grab optional logical zio criteria */
if (zb != NULL) {
search.re_io_bookmark.zb_objset = zb->zb_objset;
search.re_io_bookmark.zb_object = zb->zb_object;
search.re_io_bookmark.zb_level = zb->zb_level;
search.re_io_bookmark.zb_blkid = zb->zb_blkid;
}
uint64_t now = gethrtime();
mutex_enter(&recent_events_lock);
/* check if we have seen this one recently */
entry = avl_find(&recent_events_tree, &search, NULL);
if (entry != NULL) {
uint64_t age = NSEC2SEC(now - entry->re_timestamp);
/*
* There is still an active cleaner (since we're here).
* Reset the last seen time for this duplicate entry
* so that its lifespand gets extended.
*/
list_remove(&recent_events_list, entry);
list_insert_head(&recent_events_list, entry);
entry->re_timestamp = now;
zfs_zevent_track_duplicate();
mutex_exit(&recent_events_lock);
return (age <= zfs_zevent_retain_expire_secs);
}
if (avl_numnodes(&recent_events_tree) >= zfs_zevent_retain_max) {
/* recycle oldest node */
entry = list_tail(&recent_events_list);
ASSERT(entry != NULL);
list_remove(&recent_events_list, entry);
avl_remove(&recent_events_tree, entry);
} else {
entry = kmem_alloc(sizeof (recent_events_node_t), KM_SLEEP);
}
/* record this as a recent ereport */
*entry = search;
avl_add(&recent_events_tree, entry);
list_insert_head(&recent_events_list, entry);
entry->re_timestamp = now;
/* Start a cleaner if not already scheduled */
if (recent_events_cleaner_tqid == 0)
zfs_ereport_schedule_cleaner();
mutex_exit(&recent_events_lock);
return (B_FALSE);
}
void
zfs_zevent_post_cb(nvlist_t *nvl, nvlist_t *detector)
{
if (nvl)
fm_nvlist_destroy(nvl, FM_NVA_FREE);
if (detector)
fm_nvlist_destroy(detector, FM_NVA_FREE);
}
/*
* We want to rate limit ZIO delay, deadman, and checksum events so as to not
* flood zevent consumers when a disk is acting up.
*
* Returns 1 if we're ratelimiting, 0 if not.
*/
static int
zfs_is_ratelimiting_event(const char *subclass, vdev_t *vd)
{
int rc = 0;
/*
* zfs_ratelimit() returns 1 if we're *not* ratelimiting and 0 if we
* are. Invert it to get our return value.
*/
if (strcmp(subclass, FM_EREPORT_ZFS_DELAY) == 0) {
rc = !zfs_ratelimit(&vd->vdev_delay_rl);
} else if (strcmp(subclass, FM_EREPORT_ZFS_DEADMAN) == 0) {
rc = !zfs_ratelimit(&vd->vdev_deadman_rl);
} else if (strcmp(subclass, FM_EREPORT_ZFS_CHECKSUM) == 0) {
rc = !zfs_ratelimit(&vd->vdev_checksum_rl);
}
if (rc) {
/* We're rate limiting */
fm_erpt_dropped_increment();
}
return (rc);
}
/*
* Return B_TRUE if the event actually posted, B_FALSE if not.
*/
static boolean_t
zfs_ereport_start(nvlist_t **ereport_out, nvlist_t **detector_out,
const char *subclass, spa_t *spa, vdev_t *vd, const zbookmark_phys_t *zb,
zio_t *zio, uint64_t stateoroffset, uint64_t size)
{
nvlist_t *ereport, *detector;
uint64_t ena;
char class[64];
if ((ereport = fm_nvlist_create(NULL)) == NULL)
return (B_FALSE);
if ((detector = fm_nvlist_create(NULL)) == NULL) {
fm_nvlist_destroy(ereport, FM_NVA_FREE);
return (B_FALSE);
}
/*
* Serialize ereport generation
*/
mutex_enter(&spa->spa_errlist_lock);
/*
* Determine the ENA to use for this event. If we are in a loading
* state, use a SPA-wide ENA. Otherwise, if we are in an I/O state, use
* a root zio-wide ENA. Otherwise, simply use a unique ENA.
*/
if (spa_load_state(spa) != SPA_LOAD_NONE) {
if (spa->spa_ena == 0)
spa->spa_ena = fm_ena_generate(0, FM_ENA_FMT1);
ena = spa->spa_ena;
} else if (zio != NULL && zio->io_logical != NULL) {
if (zio->io_logical->io_ena == 0)
zio->io_logical->io_ena =
fm_ena_generate(0, FM_ENA_FMT1);
ena = zio->io_logical->io_ena;
} else {
ena = fm_ena_generate(0, FM_ENA_FMT1);
}
/*
* Construct the full class, detector, and other standard FMA fields.
*/
(void) snprintf(class, sizeof (class), "%s.%s",
ZFS_ERROR_CLASS, subclass);
fm_fmri_zfs_set(detector, FM_ZFS_SCHEME_VERSION, spa_guid(spa),
vd != NULL ? vd->vdev_guid : 0);
fm_ereport_set(ereport, FM_EREPORT_VERSION, class, ena, detector, NULL);
/*
* Construct the per-ereport payload, depending on which parameters are
* passed in.
*/
/*
* Generic payload members common to all ereports.
*/
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_POOL, DATA_TYPE_STRING, spa_name(spa),
FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, DATA_TYPE_UINT64, spa_guid(spa),
FM_EREPORT_PAYLOAD_ZFS_POOL_STATE, DATA_TYPE_UINT64,
(uint64_t)spa_state(spa),
FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT, DATA_TYPE_INT32,
(int32_t)spa_load_state(spa), NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_POOL_FAILMODE,
DATA_TYPE_STRING,
spa_get_failmode(spa) == ZIO_FAILURE_MODE_WAIT ?
FM_EREPORT_FAILMODE_WAIT :
spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE ?
FM_EREPORT_FAILMODE_CONTINUE : FM_EREPORT_FAILMODE_PANIC,
NULL);
if (vd != NULL) {
vdev_t *pvd = vd->vdev_parent;
vdev_queue_t *vq = &vd->vdev_queue;
vdev_stat_t *vs = &vd->vdev_stat;
vdev_t *spare_vd;
uint64_t *spare_guids;
char **spare_paths;
int i, spare_count;
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID,
DATA_TYPE_UINT64, vd->vdev_guid,
FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
DATA_TYPE_STRING, vd->vdev_ops->vdev_op_type, NULL);
if (vd->vdev_path != NULL)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH,
DATA_TYPE_STRING, vd->vdev_path, NULL);
if (vd->vdev_devid != NULL)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID,
DATA_TYPE_STRING, vd->vdev_devid, NULL);
if (vd->vdev_fru != NULL)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU,
DATA_TYPE_STRING, vd->vdev_fru, NULL);
if (vd->vdev_enc_sysfs_path != NULL)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH,
DATA_TYPE_STRING, vd->vdev_enc_sysfs_path, NULL);
if (vd->vdev_ashift)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_ASHIFT,
DATA_TYPE_UINT64, vd->vdev_ashift, NULL);
if (vq != NULL) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_COMP_TS,
DATA_TYPE_UINT64, vq->vq_io_complete_ts, NULL);
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_DELTA_TS,
DATA_TYPE_UINT64, vq->vq_io_delta_ts, NULL);
}
if (vs != NULL) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_READ_ERRORS,
DATA_TYPE_UINT64, vs->vs_read_errors,
FM_EREPORT_PAYLOAD_ZFS_VDEV_WRITE_ERRORS,
DATA_TYPE_UINT64, vs->vs_write_errors,
FM_EREPORT_PAYLOAD_ZFS_VDEV_CKSUM_ERRORS,
DATA_TYPE_UINT64, vs->vs_checksum_errors,
FM_EREPORT_PAYLOAD_ZFS_VDEV_DELAYS,
DATA_TYPE_UINT64, vs->vs_slow_ios,
NULL);
}
if (pvd != NULL) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID,
DATA_TYPE_UINT64, pvd->vdev_guid,
FM_EREPORT_PAYLOAD_ZFS_PARENT_TYPE,
DATA_TYPE_STRING, pvd->vdev_ops->vdev_op_type,
NULL);
if (pvd->vdev_path)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_PARENT_PATH,
DATA_TYPE_STRING, pvd->vdev_path, NULL);
if (pvd->vdev_devid)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_PARENT_DEVID,
DATA_TYPE_STRING, pvd->vdev_devid, NULL);
}
spare_count = spa->spa_spares.sav_count;
spare_paths = kmem_zalloc(sizeof (char *) * spare_count,
KM_SLEEP);
spare_guids = kmem_zalloc(sizeof (uint64_t) * spare_count,
KM_SLEEP);
for (i = 0; i < spare_count; i++) {
spare_vd = spa->spa_spares.sav_vdevs[i];
if (spare_vd) {
spare_paths[i] = spare_vd->vdev_path;
spare_guids[i] = spare_vd->vdev_guid;
}
}
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_PATHS,
DATA_TYPE_STRING_ARRAY, spare_count, spare_paths,
FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_GUIDS,
DATA_TYPE_UINT64_ARRAY, spare_count, spare_guids, NULL);
kmem_free(spare_guids, sizeof (uint64_t) * spare_count);
kmem_free(spare_paths, sizeof (char *) * spare_count);
}
if (zio != NULL) {
/*
* Payload common to all I/Os.
*/
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_ERR,
DATA_TYPE_INT32, zio->io_error, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS,
DATA_TYPE_INT32, zio->io_flags, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE,
DATA_TYPE_UINT32, zio->io_stage, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE,
DATA_TYPE_UINT32, zio->io_pipeline, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_DELAY,
DATA_TYPE_UINT64, zio->io_delay, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_TIMESTAMP,
DATA_TYPE_UINT64, zio->io_timestamp, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_DELTA,
DATA_TYPE_UINT64, zio->io_delta, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY,
DATA_TYPE_UINT32, zio->io_priority, NULL);
/*
* If the 'size' parameter is non-zero, it indicates this is a
* RAID-Z or other I/O where the physical offset and length are
* provided for us, instead of within the zio_t.
*/
if (vd != NULL) {
if (size)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET,
DATA_TYPE_UINT64, stateoroffset,
FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE,
DATA_TYPE_UINT64, size, NULL);
else
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET,
DATA_TYPE_UINT64, zio->io_offset,
FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE,
DATA_TYPE_UINT64, zio->io_size, NULL);
}
} else if (vd != NULL) {
/*
* If we have a vdev but no zio, this is a device fault, and the
* 'stateoroffset' parameter indicates the previous state of the
* vdev.
*/
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_PREV_STATE,
DATA_TYPE_UINT64, stateoroffset, NULL);
}
/*
* Payload for I/Os with corresponding logical information.
*/
if (zb != NULL && (zio == NULL || zio->io_logical != NULL)) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJSET,
DATA_TYPE_UINT64, zb->zb_objset,
FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJECT,
DATA_TYPE_UINT64, zb->zb_object,
FM_EREPORT_PAYLOAD_ZFS_ZIO_LEVEL,
DATA_TYPE_INT64, zb->zb_level,
FM_EREPORT_PAYLOAD_ZFS_ZIO_BLKID,
DATA_TYPE_UINT64, zb->zb_blkid, NULL);
}
/*
* Payload for tuning the zed
*/
if (vd != NULL && strcmp(subclass, FM_EREPORT_ZFS_CHECKSUM) == 0) {
uint64_t cksum_n, cksum_t;
cksum_n = vdev_prop_get_inherited(vd, VDEV_PROP_CHECKSUM_N);
if (cksum_n != vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_N))
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_CKSUM_N,
DATA_TYPE_UINT64,
cksum_n,
NULL);
cksum_t = vdev_prop_get_inherited(vd, VDEV_PROP_CHECKSUM_T);
if (cksum_t != vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_T))
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_CKSUM_T,
DATA_TYPE_UINT64,
cksum_t,
NULL);
}
if (vd != NULL && strcmp(subclass, FM_EREPORT_ZFS_IO) == 0) {
uint64_t io_n, io_t;
io_n = vdev_prop_get_inherited(vd, VDEV_PROP_IO_N);
if (io_n != vdev_prop_default_numeric(VDEV_PROP_IO_N))
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_IO_N,
DATA_TYPE_UINT64,
io_n,
NULL);
io_t = vdev_prop_get_inherited(vd, VDEV_PROP_IO_T);
if (io_t != vdev_prop_default_numeric(VDEV_PROP_IO_T))
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_IO_T,
DATA_TYPE_UINT64,
io_t,
NULL);
}
mutex_exit(&spa->spa_errlist_lock);
*ereport_out = ereport;
*detector_out = detector;
return (B_TRUE);
}
/* if it's <= 128 bytes, save the corruption directly */
#define ZFM_MAX_INLINE (128 / sizeof (uint64_t))
#define MAX_RANGES 16
typedef struct zfs_ecksum_info {
- /* histograms of set and cleared bits by bit number in a 64-bit word */
- uint8_t zei_histogram_set[sizeof (uint64_t) * NBBY];
- uint8_t zei_histogram_cleared[sizeof (uint64_t) * NBBY];
-
/* inline arrays of bits set and cleared. */
uint64_t zei_bits_set[ZFM_MAX_INLINE];
uint64_t zei_bits_cleared[ZFM_MAX_INLINE];
/*
* for each range, the number of bits set and cleared. The Hamming
* distance between the good and bad buffers is the sum of them all.
*/
uint32_t zei_range_sets[MAX_RANGES];
uint32_t zei_range_clears[MAX_RANGES];
struct zei_ranges {
uint32_t zr_start;
uint32_t zr_end;
} zei_ranges[MAX_RANGES];
size_t zei_range_count;
uint32_t zei_mingap;
uint32_t zei_allowed_mingap;
} zfs_ecksum_info_t;
static void
-update_histogram(uint64_t value_arg, uint8_t *hist, uint32_t *count)
+update_bad_bits(uint64_t value_arg, uint32_t *count)
{
size_t i;
size_t bits = 0;
uint64_t value = BE_64(value_arg);
/* We store the bits in big-endian (largest-first) order */
for (i = 0; i < 64; i++) {
- if (value & (1ull << i)) {
- hist[63 - i]++;
+ if (value & (1ull << i))
++bits;
- }
}
/* update the count of bits changed */
*count += bits;
}
/*
* We've now filled up the range array, and need to increase "mingap" and
* shrink the range list accordingly. zei_mingap is always the smallest
* distance between array entries, so we set the new_allowed_gap to be
* one greater than that. We then go through the list, joining together
* any ranges which are closer than the new_allowed_gap.
*
* By construction, there will be at least one. We also update zei_mingap
* to the new smallest gap, to prepare for our next invocation.
*/
static void
zei_shrink_ranges(zfs_ecksum_info_t *eip)
{
uint32_t mingap = UINT32_MAX;
uint32_t new_allowed_gap = eip->zei_mingap + 1;
size_t idx, output;
size_t max = eip->zei_range_count;
struct zei_ranges *r = eip->zei_ranges;
ASSERT3U(eip->zei_range_count, >, 0);
ASSERT3U(eip->zei_range_count, <=, MAX_RANGES);
output = idx = 0;
while (idx < max - 1) {
uint32_t start = r[idx].zr_start;
uint32_t end = r[idx].zr_end;
while (idx < max - 1) {
idx++;
uint32_t nstart = r[idx].zr_start;
uint32_t nend = r[idx].zr_end;
uint32_t gap = nstart - end;
if (gap < new_allowed_gap) {
end = nend;
continue;
}
if (gap < mingap)
mingap = gap;
break;
}
r[output].zr_start = start;
r[output].zr_end = end;
output++;
}
ASSERT3U(output, <, eip->zei_range_count);
eip->zei_range_count = output;
eip->zei_mingap = mingap;
eip->zei_allowed_mingap = new_allowed_gap;
}
static void
zei_add_range(zfs_ecksum_info_t *eip, int start, int end)
{
struct zei_ranges *r = eip->zei_ranges;
size_t count = eip->zei_range_count;
if (count >= MAX_RANGES) {
zei_shrink_ranges(eip);
count = eip->zei_range_count;
}
if (count == 0) {
eip->zei_mingap = UINT32_MAX;
eip->zei_allowed_mingap = 1;
} else {
int gap = start - r[count - 1].zr_end;
if (gap < eip->zei_allowed_mingap) {
r[count - 1].zr_end = end;
return;
}
if (gap < eip->zei_mingap)
eip->zei_mingap = gap;
}
r[count].zr_start = start;
r[count].zr_end = end;
eip->zei_range_count++;
}
static size_t
zei_range_total_size(zfs_ecksum_info_t *eip)
{
struct zei_ranges *r = eip->zei_ranges;
size_t count = eip->zei_range_count;
size_t result = 0;
size_t idx;
for (idx = 0; idx < count; idx++)
result += (r[idx].zr_end - r[idx].zr_start);
return (result);
}
static zfs_ecksum_info_t *
annotate_ecksum(nvlist_t *ereport, zio_bad_cksum_t *info,
const abd_t *goodabd, const abd_t *badabd, size_t size,
boolean_t drop_if_identical)
{
const uint64_t *good;
const uint64_t *bad;
size_t nui64s = size / sizeof (uint64_t);
size_t inline_size;
int no_inline = 0;
size_t idx;
size_t range;
size_t offset = 0;
ssize_t start = -1;
zfs_ecksum_info_t *eip = kmem_zalloc(sizeof (*eip), KM_SLEEP);
/* don't do any annotation for injected checksum errors */
if (info != NULL && info->zbc_injected)
return (eip);
if (info != NULL && info->zbc_has_cksum) {
fm_payload_set(ereport,
- FM_EREPORT_PAYLOAD_ZFS_CKSUM_EXPECTED,
- DATA_TYPE_UINT64_ARRAY,
- sizeof (info->zbc_expected) / sizeof (uint64_t),
- (uint64_t *)&info->zbc_expected,
- FM_EREPORT_PAYLOAD_ZFS_CKSUM_ACTUAL,
- DATA_TYPE_UINT64_ARRAY,
- sizeof (info->zbc_actual) / sizeof (uint64_t),
- (uint64_t *)&info->zbc_actual,
FM_EREPORT_PAYLOAD_ZFS_CKSUM_ALGO,
DATA_TYPE_STRING,
info->zbc_checksum_name,
NULL);
if (info->zbc_byteswapped) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_CKSUM_BYTESWAP,
DATA_TYPE_BOOLEAN, 1,
NULL);
}
}
if (badabd == NULL || goodabd == NULL)
return (eip);
ASSERT3U(nui64s, <=, UINT32_MAX);
ASSERT3U(size, ==, nui64s * sizeof (uint64_t));
ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
ASSERT3U(size, <=, UINT32_MAX);
good = (const uint64_t *) abd_borrow_buf_copy((abd_t *)goodabd, size);
bad = (const uint64_t *) abd_borrow_buf_copy((abd_t *)badabd, size);
/* build up the range list by comparing the two buffers. */
for (idx = 0; idx < nui64s; idx++) {
if (good[idx] == bad[idx]) {
if (start == -1)
continue;
zei_add_range(eip, start, idx);
start = -1;
} else {
if (start != -1)
continue;
start = idx;
}
}
if (start != -1)
zei_add_range(eip, start, idx);
/* See if it will fit in our inline buffers */
inline_size = zei_range_total_size(eip);
if (inline_size > ZFM_MAX_INLINE)
no_inline = 1;
/*
* If there is no change and we want to drop if the buffers are
* identical, do so.
*/
if (inline_size == 0 && drop_if_identical) {
kmem_free(eip, sizeof (*eip));
abd_return_buf((abd_t *)goodabd, (void *)good, size);
abd_return_buf((abd_t *)badabd, (void *)bad, size);
return (NULL);
}
/*
* Now walk through the ranges, filling in the details of the
* differences. Also convert our uint64_t-array offsets to byte
* offsets.
*/
for (range = 0; range < eip->zei_range_count; range++) {
size_t start = eip->zei_ranges[range].zr_start;
size_t end = eip->zei_ranges[range].zr_end;
for (idx = start; idx < end; idx++) {
uint64_t set, cleared;
// bits set in bad, but not in good
set = ((~good[idx]) & bad[idx]);
// bits set in good, but not in bad
cleared = (good[idx] & (~bad[idx]));
if (!no_inline) {
ASSERT3U(offset, <, inline_size);
eip->zei_bits_set[offset] = set;
eip->zei_bits_cleared[offset] = cleared;
offset++;
}
- update_histogram(set, eip->zei_histogram_set,
- &eip->zei_range_sets[range]);
- update_histogram(cleared, eip->zei_histogram_cleared,
- &eip->zei_range_clears[range]);
+ update_bad_bits(set, &eip->zei_range_sets[range]);
+ update_bad_bits(cleared, &eip->zei_range_clears[range]);
}
/* convert to byte offsets */
eip->zei_ranges[range].zr_start *= sizeof (uint64_t);
eip->zei_ranges[range].zr_end *= sizeof (uint64_t);
}
abd_return_buf((abd_t *)goodabd, (void *)good, size);
abd_return_buf((abd_t *)badabd, (void *)bad, size);
eip->zei_allowed_mingap *= sizeof (uint64_t);
inline_size *= sizeof (uint64_t);
/* fill in ereport */
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_BAD_OFFSET_RANGES,
DATA_TYPE_UINT32_ARRAY, 2 * eip->zei_range_count,
(uint32_t *)eip->zei_ranges,
FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_MIN_GAP,
DATA_TYPE_UINT32, eip->zei_allowed_mingap,
FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_SETS,
DATA_TYPE_UINT32_ARRAY, eip->zei_range_count, eip->zei_range_sets,
FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_CLEARS,
DATA_TYPE_UINT32_ARRAY, eip->zei_range_count, eip->zei_range_clears,
NULL);
if (!no_inline) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_BAD_SET_BITS,
DATA_TYPE_UINT8_ARRAY,
inline_size, (uint8_t *)eip->zei_bits_set,
FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_BITS,
DATA_TYPE_UINT8_ARRAY,
inline_size, (uint8_t *)eip->zei_bits_cleared,
NULL);
- } else {
- fm_payload_set(ereport,
- FM_EREPORT_PAYLOAD_ZFS_BAD_SET_HISTOGRAM,
- DATA_TYPE_UINT8_ARRAY,
- NBBY * sizeof (uint64_t), eip->zei_histogram_set,
- FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_HISTOGRAM,
- DATA_TYPE_UINT8_ARRAY,
- NBBY * sizeof (uint64_t), eip->zei_histogram_cleared,
- NULL);
}
return (eip);
}
#else
void
zfs_ereport_clear(spa_t *spa, vdev_t *vd)
{
(void) spa, (void) vd;
}
#endif
/*
* Make sure our event is still valid for the given zio/vdev/pool. For example,
* we don't want to keep logging events for a faulted or missing vdev.
*/
boolean_t
zfs_ereport_is_valid(const char *subclass, spa_t *spa, vdev_t *vd, zio_t *zio)
{
#ifdef _KERNEL
/*
* If we are doing a spa_tryimport() or in recovery mode,
* ignore errors.
*/
if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT ||
spa_load_state(spa) == SPA_LOAD_RECOVER)
return (B_FALSE);
/*
* If we are in the middle of opening a pool, and the previous attempt
* failed, don't bother logging any new ereports - we're just going to
* get the same diagnosis anyway.
*/
if (spa_load_state(spa) != SPA_LOAD_NONE &&
spa->spa_last_open_failed)
return (B_FALSE);
if (zio != NULL) {
/*
* If this is not a read or write zio, ignore the error. This
* can occur if the DKIOCFLUSHWRITECACHE ioctl fails.
*/
if (zio->io_type != ZIO_TYPE_READ &&
zio->io_type != ZIO_TYPE_WRITE)
return (B_FALSE);
if (vd != NULL) {
/*
* If the vdev has already been marked as failing due
* to a failed probe, then ignore any subsequent I/O
* errors, as the DE will automatically fault the vdev
* on the first such failure. This also catches cases
* where vdev_remove_wanted is set and the device has
* not yet been asynchronously placed into the REMOVED
* state.
*/
if (zio->io_vd == vd && !vdev_accessible(vd, zio))
return (B_FALSE);
/*
* Ignore checksum errors for reads from DTL regions of
* leaf vdevs.
*/
if (zio->io_type == ZIO_TYPE_READ &&
zio->io_error == ECKSUM &&
vd->vdev_ops->vdev_op_leaf &&
vdev_dtl_contains(vd, DTL_MISSING, zio->io_txg, 1))
return (B_FALSE);
}
}
/*
* For probe failure, we want to avoid posting ereports if we've
* already removed the device in the meantime.
*/
if (vd != NULL &&
strcmp(subclass, FM_EREPORT_ZFS_PROBE_FAILURE) == 0 &&
(vd->vdev_remove_wanted || vd->vdev_state == VDEV_STATE_REMOVED))
return (B_FALSE);
/* Ignore bogus delay events (like from ioctls or unqueued IOs) */
if ((strcmp(subclass, FM_EREPORT_ZFS_DELAY) == 0) &&
(zio != NULL) && (!zio->io_timestamp)) {
return (B_FALSE);
}
#else
(void) subclass, (void) spa, (void) vd, (void) zio;
#endif
return (B_TRUE);
}
/*
* Post an ereport for the given subclass
*
* Returns
* - 0 if an event was posted
* - EINVAL if there was a problem posting event
* - EBUSY if the event was rate limited
* - EALREADY if the event was already posted (duplicate)
*/
int
zfs_ereport_post(const char *subclass, spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, zio_t *zio, uint64_t state)
{
int rc = 0;
#ifdef _KERNEL
nvlist_t *ereport = NULL;
nvlist_t *detector = NULL;
if (!zfs_ereport_is_valid(subclass, spa, vd, zio))
return (EINVAL);
if (zfs_ereport_is_duplicate(subclass, spa, vd, zb, zio, 0, 0))
return (SET_ERROR(EALREADY));
if (zfs_is_ratelimiting_event(subclass, vd))
return (SET_ERROR(EBUSY));
if (!zfs_ereport_start(&ereport, &detector, subclass, spa, vd,
zb, zio, state, 0))
return (SET_ERROR(EINVAL)); /* couldn't post event */
if (ereport == NULL)
return (SET_ERROR(EINVAL));
/* Cleanup is handled by the callback function */
rc = zfs_zevent_post(ereport, detector, zfs_zevent_post_cb);
#else
(void) subclass, (void) spa, (void) vd, (void) zb, (void) zio,
(void) state;
#endif
return (rc);
}
/*
* Prepare a checksum ereport
*
* Returns
* - 0 if an event was posted
* - EINVAL if there was a problem posting event
* - EBUSY if the event was rate limited
* - EALREADY if the event was already posted (duplicate)
*/
int
zfs_ereport_start_checksum(spa_t *spa, vdev_t *vd, const zbookmark_phys_t *zb,
struct zio *zio, uint64_t offset, uint64_t length, zio_bad_cksum_t *info)
{
zio_cksum_report_t *report;
#ifdef _KERNEL
if (!zfs_ereport_is_valid(FM_EREPORT_ZFS_CHECKSUM, spa, vd, zio))
return (SET_ERROR(EINVAL));
if (zfs_ereport_is_duplicate(FM_EREPORT_ZFS_CHECKSUM, spa, vd, zb, zio,
offset, length))
return (SET_ERROR(EALREADY));
if (zfs_is_ratelimiting_event(FM_EREPORT_ZFS_CHECKSUM, vd))
return (SET_ERROR(EBUSY));
#else
(void) zb, (void) offset;
#endif
report = kmem_zalloc(sizeof (*report), KM_SLEEP);
zio_vsd_default_cksum_report(zio, report);
/* copy the checksum failure information if it was provided */
if (info != NULL) {
report->zcr_ckinfo = kmem_zalloc(sizeof (*info), KM_SLEEP);
memcpy(report->zcr_ckinfo, info, sizeof (*info));
}
report->zcr_sector = 1ULL << vd->vdev_top->vdev_ashift;
report->zcr_align =
vdev_psize_to_asize(vd->vdev_top, report->zcr_sector);
report->zcr_length = length;
#ifdef _KERNEL
(void) zfs_ereport_start(&report->zcr_ereport, &report->zcr_detector,
FM_EREPORT_ZFS_CHECKSUM, spa, vd, zb, zio, offset, length);
if (report->zcr_ereport == NULL) {
zfs_ereport_free_checksum(report);
return (0);
}
#endif
mutex_enter(&spa->spa_errlist_lock);
report->zcr_next = zio->io_logical->io_cksum_report;
zio->io_logical->io_cksum_report = report;
mutex_exit(&spa->spa_errlist_lock);
return (0);
}
void
zfs_ereport_finish_checksum(zio_cksum_report_t *report, const abd_t *good_data,
const abd_t *bad_data, boolean_t drop_if_identical)
{
#ifdef _KERNEL
zfs_ecksum_info_t *info;
info = annotate_ecksum(report->zcr_ereport, report->zcr_ckinfo,
good_data, bad_data, report->zcr_length, drop_if_identical);
if (info != NULL)
zfs_zevent_post(report->zcr_ereport,
report->zcr_detector, zfs_zevent_post_cb);
else
zfs_zevent_post_cb(report->zcr_ereport, report->zcr_detector);
report->zcr_ereport = report->zcr_detector = NULL;
if (info != NULL)
kmem_free(info, sizeof (*info));
#else
(void) report, (void) good_data, (void) bad_data,
(void) drop_if_identical;
#endif
}
void
zfs_ereport_free_checksum(zio_cksum_report_t *rpt)
{
#ifdef _KERNEL
if (rpt->zcr_ereport != NULL) {
fm_nvlist_destroy(rpt->zcr_ereport,
FM_NVA_FREE);
fm_nvlist_destroy(rpt->zcr_detector,
FM_NVA_FREE);
}
#endif
rpt->zcr_free(rpt->zcr_cbdata, rpt->zcr_cbinfo);
if (rpt->zcr_ckinfo != NULL)
kmem_free(rpt->zcr_ckinfo, sizeof (*rpt->zcr_ckinfo));
kmem_free(rpt, sizeof (*rpt));
}
/*
* Post a checksum ereport
*
* Returns
* - 0 if an event was posted
* - EINVAL if there was a problem posting event
* - EBUSY if the event was rate limited
* - EALREADY if the event was already posted (duplicate)
*/
int
zfs_ereport_post_checksum(spa_t *spa, vdev_t *vd, const zbookmark_phys_t *zb,
struct zio *zio, uint64_t offset, uint64_t length,
const abd_t *good_data, const abd_t *bad_data, zio_bad_cksum_t *zbc)
{
int rc = 0;
#ifdef _KERNEL
nvlist_t *ereport = NULL;
nvlist_t *detector = NULL;
zfs_ecksum_info_t *info;
if (!zfs_ereport_is_valid(FM_EREPORT_ZFS_CHECKSUM, spa, vd, zio))
return (SET_ERROR(EINVAL));
if (zfs_ereport_is_duplicate(FM_EREPORT_ZFS_CHECKSUM, spa, vd, zb, zio,
offset, length))
return (SET_ERROR(EALREADY));
if (zfs_is_ratelimiting_event(FM_EREPORT_ZFS_CHECKSUM, vd))
return (SET_ERROR(EBUSY));
if (!zfs_ereport_start(&ereport, &detector, FM_EREPORT_ZFS_CHECKSUM,
spa, vd, zb, zio, offset, length) || (ereport == NULL)) {
return (SET_ERROR(EINVAL));
}
info = annotate_ecksum(ereport, zbc, good_data, bad_data, length,
B_FALSE);
if (info != NULL) {
rc = zfs_zevent_post(ereport, detector, zfs_zevent_post_cb);
kmem_free(info, sizeof (*info));
}
#else
(void) spa, (void) vd, (void) zb, (void) zio, (void) offset,
(void) length, (void) good_data, (void) bad_data, (void) zbc;
#endif
return (rc);
}
/*
* The 'sysevent.fs.zfs.*' events are signals posted to notify user space of
* change in the pool. All sysevents are listed in sys/sysevent/eventdefs.h
* and are designed to be consumed by the ZFS Event Daemon (ZED). For
* additional details refer to the zed(8) man page.
*/
nvlist_t *
zfs_event_create(spa_t *spa, vdev_t *vd, const char *type, const char *name,
nvlist_t *aux)
{
nvlist_t *resource = NULL;
#ifdef _KERNEL
char class[64];
if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT)
return (NULL);
if ((resource = fm_nvlist_create(NULL)) == NULL)
return (NULL);
(void) snprintf(class, sizeof (class), "%s.%s.%s", type,
ZFS_ERROR_CLASS, name);
VERIFY0(nvlist_add_uint8(resource, FM_VERSION, FM_RSRC_VERSION));
VERIFY0(nvlist_add_string(resource, FM_CLASS, class));
VERIFY0(nvlist_add_string(resource,
FM_EREPORT_PAYLOAD_ZFS_POOL, spa_name(spa)));
VERIFY0(nvlist_add_uint64(resource,
FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, spa_guid(spa)));
VERIFY0(nvlist_add_uint64(resource,
FM_EREPORT_PAYLOAD_ZFS_POOL_STATE, spa_state(spa)));
VERIFY0(nvlist_add_int32(resource,
FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT, spa_load_state(spa)));
if (vd) {
VERIFY0(nvlist_add_uint64(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, vd->vdev_guid));
VERIFY0(nvlist_add_uint64(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE, vd->vdev_state));
if (vd->vdev_path != NULL)
VERIFY0(nvlist_add_string(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH, vd->vdev_path));
if (vd->vdev_devid != NULL)
VERIFY0(nvlist_add_string(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID, vd->vdev_devid));
if (vd->vdev_fru != NULL)
VERIFY0(nvlist_add_string(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU, vd->vdev_fru));
if (vd->vdev_enc_sysfs_path != NULL)
VERIFY0(nvlist_add_string(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH,
vd->vdev_enc_sysfs_path));
}
/* also copy any optional payload data */
if (aux) {
nvpair_t *elem = NULL;
while ((elem = nvlist_next_nvpair(aux, elem)) != NULL)
(void) nvlist_add_nvpair(resource, elem);
}
#else
(void) spa, (void) vd, (void) type, (void) name, (void) aux;
#endif
return (resource);
}
static void
zfs_post_common(spa_t *spa, vdev_t *vd, const char *type, const char *name,
nvlist_t *aux)
{
#ifdef _KERNEL
nvlist_t *resource;
resource = zfs_event_create(spa, vd, type, name, aux);
if (resource)
zfs_zevent_post(resource, NULL, zfs_zevent_post_cb);
#else
(void) spa, (void) vd, (void) type, (void) name, (void) aux;
#endif
}
/*
* The 'resource.fs.zfs.removed' event is an internal signal that the given vdev
* has been removed from the system. This will cause the DE to ignore any
* recent I/O errors, inferring that they are due to the asynchronous device
* removal.
*/
void
zfs_post_remove(spa_t *spa, vdev_t *vd)
{
zfs_post_common(spa, vd, FM_RSRC_CLASS, FM_RESOURCE_REMOVED, NULL);
}
/*
* The 'resource.fs.zfs.autoreplace' event is an internal signal that the pool
* has the 'autoreplace' property set, and therefore any broken vdevs will be
* handled by higher level logic, and no vdev fault should be generated.
*/
void
zfs_post_autoreplace(spa_t *spa, vdev_t *vd)
{
zfs_post_common(spa, vd, FM_RSRC_CLASS, FM_RESOURCE_AUTOREPLACE, NULL);
}
/*
* The 'resource.fs.zfs.statechange' event is an internal signal that the
* given vdev has transitioned its state to DEGRADED or HEALTHY. This will
* cause the retire agent to repair any outstanding fault management cases
* open because the device was not found (fault.fs.zfs.device).
*/
void
zfs_post_state_change(spa_t *spa, vdev_t *vd, uint64_t laststate)
{
#ifdef _KERNEL
nvlist_t *aux;
/*
* Add optional supplemental keys to payload
*/
aux = fm_nvlist_create(NULL);
if (vd && aux) {
if (vd->vdev_physpath) {
fnvlist_add_string(aux,
FM_EREPORT_PAYLOAD_ZFS_VDEV_PHYSPATH,
vd->vdev_physpath);
}
if (vd->vdev_enc_sysfs_path) {
fnvlist_add_string(aux,
FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH,
vd->vdev_enc_sysfs_path);
}
fnvlist_add_uint64(aux,
FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE, laststate);
}
zfs_post_common(spa, vd, FM_RSRC_CLASS, FM_RESOURCE_STATECHANGE,
aux);
if (aux)
fm_nvlist_destroy(aux, FM_NVA_FREE);
#else
(void) spa, (void) vd, (void) laststate;
#endif
}
#ifdef _KERNEL
void
zfs_ereport_init(void)
{
mutex_init(&recent_events_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&recent_events_list, sizeof (recent_events_node_t),
offsetof(recent_events_node_t, re_list_link));
avl_create(&recent_events_tree, recent_events_compare,
sizeof (recent_events_node_t), offsetof(recent_events_node_t,
re_tree_link));
}
/*
* This 'early' fini needs to run before zfs_fini() which on Linux waits
* for the system_delay_taskq to drain.
*/
void
zfs_ereport_taskq_fini(void)
{
mutex_enter(&recent_events_lock);
if (recent_events_cleaner_tqid != 0) {
taskq_cancel_id(system_delay_taskq, recent_events_cleaner_tqid);
recent_events_cleaner_tqid = 0;
}
mutex_exit(&recent_events_lock);
}
void
zfs_ereport_fini(void)
{
recent_events_node_t *entry;
while ((entry = list_remove_head(&recent_events_list)) != NULL) {
avl_remove(&recent_events_tree, entry);
kmem_free(entry, sizeof (*entry));
}
avl_destroy(&recent_events_tree);
list_destroy(&recent_events_list);
mutex_destroy(&recent_events_lock);
}
void
zfs_ereport_snapshot_post(const char *subclass, spa_t *spa, const char *name)
{
nvlist_t *aux;
aux = fm_nvlist_create(NULL);
fnvlist_add_string(aux, FM_EREPORT_PAYLOAD_ZFS_SNAPSHOT_NAME, name);
zfs_post_common(spa, NULL, FM_RSRC_CLASS, subclass, aux);
fm_nvlist_destroy(aux, FM_NVA_FREE);
}
/*
* Post when a event when a zvol is created or removed
*
* This is currently only used by macOS, since it uses the event to create
* symlinks between the volume name (mypool/myvol) and the actual /dev
* device (/dev/disk3). For example:
*
* /var/run/zfs/dsk/mypool/myvol -> /dev/disk3
*
* name: The full name of the zvol ("mypool/myvol")
* dev_name: The full /dev name for the zvol ("/dev/disk3")
* raw_name: The raw /dev name for the zvol ("/dev/rdisk3")
*/
void
zfs_ereport_zvol_post(const char *subclass, const char *name,
const char *dev_name, const char *raw_name)
{
nvlist_t *aux;
char *r;
boolean_t locked = mutex_owned(&spa_namespace_lock);
if (!locked) mutex_enter(&spa_namespace_lock);
spa_t *spa = spa_lookup(name);
if (!locked) mutex_exit(&spa_namespace_lock);
if (spa == NULL)
return;
aux = fm_nvlist_create(NULL);
fnvlist_add_string(aux, FM_EREPORT_PAYLOAD_ZFS_DEVICE_NAME, dev_name);
fnvlist_add_string(aux, FM_EREPORT_PAYLOAD_ZFS_RAW_DEVICE_NAME,
raw_name);
r = strchr(name, '/');
if (r && r[1])
fnvlist_add_string(aux, FM_EREPORT_PAYLOAD_ZFS_VOLUME, &r[1]);
zfs_post_common(spa, NULL, FM_RSRC_CLASS, subclass, aux);
fm_nvlist_destroy(aux, FM_NVA_FREE);
}
EXPORT_SYMBOL(zfs_ereport_post);
EXPORT_SYMBOL(zfs_ereport_is_valid);
EXPORT_SYMBOL(zfs_ereport_post_checksum);
EXPORT_SYMBOL(zfs_post_remove);
EXPORT_SYMBOL(zfs_post_autoreplace);
EXPORT_SYMBOL(zfs_post_state_change);
ZFS_MODULE_PARAM(zfs_zevent, zfs_zevent_, retain_max, UINT, ZMOD_RW,
"Maximum recent zevents records to retain for duplicate checking");
ZFS_MODULE_PARAM(zfs_zevent, zfs_zevent_, retain_expire_secs, UINT, ZMOD_RW,
"Expiration time for recent zevents records");
#endif /* _KERNEL */
diff --git a/sys/contrib/openzfs/module/zfs/zfs_vnops.c b/sys/contrib/openzfs/module/zfs/zfs_vnops.c
index 7bdcc1639384..f8d13075d5c0 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_vnops.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_vnops.c
@@ -1,1460 +1,1466 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
*/
/* Portions Copyright 2007 Jeremy Teo */
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/sysmacros.h>
#include <sys/vfs.h>
#include <sys/uio_impl.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/kmem.h>
#include <sys/cmn_err.h>
#include <sys/errno.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_ioctl.h>
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/dbuf.h>
#include <sys/policy.h>
#include <sys/zfeature.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_quota.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_znode.h>
static ulong_t zfs_fsync_sync_cnt = 4;
int
zfs_fsync(znode_t *zp, int syncflag, cred_t *cr)
{
int error = 0;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
(void) tsd_set(zfs_fsyncer_key, (void *)(uintptr_t)zfs_fsync_sync_cnt);
if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
goto out;
atomic_inc_32(&zp->z_sync_writes_cnt);
zil_commit(zfsvfs->z_log, zp->z_id);
atomic_dec_32(&zp->z_sync_writes_cnt);
zfs_exit(zfsvfs, FTAG);
}
out:
tsd_set(zfs_fsyncer_key, NULL);
return (error);
}
#if defined(SEEK_HOLE) && defined(SEEK_DATA)
/*
* Lseek support for finding holes (cmd == SEEK_HOLE) and
* data (cmd == SEEK_DATA). "off" is an in/out parameter.
*/
static int
zfs_holey_common(znode_t *zp, ulong_t cmd, loff_t *off)
{
zfs_locked_range_t *lr;
uint64_t noff = (uint64_t)*off; /* new offset */
uint64_t file_sz;
int error;
boolean_t hole;
file_sz = zp->z_size;
if (noff >= file_sz) {
return (SET_ERROR(ENXIO));
}
if (cmd == F_SEEK_HOLE)
hole = B_TRUE;
else
hole = B_FALSE;
/* Flush any mmap()'d data to disk */
if (zn_has_cached_data(zp, 0, file_sz - 1))
zn_flush_cached_data(zp, B_FALSE);
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_READER);
error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
zfs_rangelock_exit(lr);
if (error == ESRCH)
return (SET_ERROR(ENXIO));
/* File was dirty, so fall back to using generic logic */
if (error == EBUSY) {
if (hole)
*off = file_sz;
return (0);
}
/*
* We could find a hole that begins after the logical end-of-file,
* because dmu_offset_next() only works on whole blocks. If the
* EOF falls mid-block, then indicate that the "virtual hole"
* at the end of the file begins at the logical EOF, rather than
* at the end of the last block.
*/
if (noff > file_sz) {
ASSERT(hole);
noff = file_sz;
}
if (noff < *off)
return (error);
*off = noff;
return (error);
}
int
zfs_holey(znode_t *zp, ulong_t cmd, loff_t *off)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
error = zfs_holey_common(zp, cmd, off);
zfs_exit(zfsvfs, FTAG);
return (error);
}
#endif /* SEEK_HOLE && SEEK_DATA */
int
zfs_access(znode_t *zp, int mode, int flag, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
if (flag & V_ACE_MASK)
#if defined(__linux__)
error = zfs_zaccess(zp, mode, flag, B_FALSE, cr,
zfs_init_idmap);
#else
error = zfs_zaccess(zp, mode, flag, B_FALSE, cr,
NULL);
#endif
else
#if defined(__linux__)
error = zfs_zaccess_rwx(zp, mode, flag, cr, zfs_init_idmap);
#else
error = zfs_zaccess_rwx(zp, mode, flag, cr, NULL);
#endif
zfs_exit(zfsvfs, FTAG);
return (error);
}
static uint64_t zfs_vnops_read_chunk_size = 1024 * 1024; /* Tunable */
/*
* Read bytes from specified file into supplied buffer.
*
* IN: zp - inode of file to be read from.
* uio - structure supplying read location, range info,
* and return buffer.
* ioflag - O_SYNC flags; used to provide FRSYNC semantics.
* O_DIRECT flag; used to bypass page cache.
* cr - credentials of caller.
*
* OUT: uio - updated offset and range, buffer filled.
*
* RETURN: 0 on success, error code on failure.
*
* Side Effects:
* inode - atime updated if byte count > 0
*/
int
zfs_read(struct znode *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
{
(void) cr;
int error = 0;
boolean_t frsync = B_FALSE;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
if (zp->z_pflags & ZFS_AV_QUARANTINED) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EACCES));
}
/* We don't copy out anything useful for directories. */
if (Z_ISDIR(ZTOTYPE(zp))) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EISDIR));
}
/*
* Validate file offset
*/
if (zfs_uio_offset(uio) < (offset_t)0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Fasttrack empty reads
*/
if (zfs_uio_resid(uio) == 0) {
zfs_exit(zfsvfs, FTAG);
return (0);
}
#ifdef FRSYNC
/*
* If we're in FRSYNC mode, sync out this znode before reading it.
* Only do this for non-snapshots.
*
* Some platforms do not support FRSYNC and instead map it
* to O_SYNC, which results in unnecessary calls to zil_commit. We
* only honor FRSYNC requests on platforms which support it.
*/
frsync = !!(ioflag & FRSYNC);
#endif
if (zfsvfs->z_log &&
(frsync || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
zil_commit(zfsvfs->z_log, zp->z_id);
/*
* Lock the range against changes.
*/
zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
zfs_uio_offset(uio), zfs_uio_resid(uio), RL_READER);
/*
* If we are reading past end-of-file we can skip
* to the end; but we might still need to set atime.
*/
if (zfs_uio_offset(uio) >= zp->z_size) {
error = 0;
goto out;
}
ASSERT(zfs_uio_offset(uio) < zp->z_size);
#if defined(__linux__)
ssize_t start_offset = zfs_uio_offset(uio);
#endif
ssize_t n = MIN(zfs_uio_resid(uio), zp->z_size - zfs_uio_offset(uio));
ssize_t start_resid = n;
while (n > 0) {
ssize_t nbytes = MIN(n, zfs_vnops_read_chunk_size -
P2PHASE(zfs_uio_offset(uio), zfs_vnops_read_chunk_size));
#ifdef UIO_NOCOPY
if (zfs_uio_segflg(uio) == UIO_NOCOPY)
error = mappedread_sf(zp, nbytes, uio);
else
#endif
if (zn_has_cached_data(zp, zfs_uio_offset(uio),
zfs_uio_offset(uio) + nbytes - 1) && !(ioflag & O_DIRECT)) {
error = mappedread(zp, nbytes, uio);
} else {
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, nbytes);
}
if (error) {
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = SET_ERROR(EIO);
#if defined(__linux__)
/*
* if we actually read some bytes, bubbling EFAULT
* up to become EAGAIN isn't what we want here...
*
* ...on Linux, at least. On FBSD, doing this breaks.
*/
if (error == EFAULT &&
(zfs_uio_offset(uio) - start_offset) != 0)
error = 0;
#endif
break;
}
n -= nbytes;
}
int64_t nread = start_resid - n;
dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread);
task_io_account_read(nread);
out:
zfs_rangelock_exit(lr);
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
zfs_exit(zfsvfs, FTAG);
return (error);
}
static void
zfs_clear_setid_bits_if_necessary(zfsvfs_t *zfsvfs, znode_t *zp, cred_t *cr,
uint64_t *clear_setid_bits_txgp, dmu_tx_t *tx)
{
zilog_t *zilog = zfsvfs->z_log;
const uint64_t uid = KUID_TO_SUID(ZTOUID(zp));
ASSERT(clear_setid_bits_txgp != NULL);
ASSERT(tx != NULL);
/*
* Clear Set-UID/Set-GID bits on successful write if not
* privileged and at least one of the execute bits is set.
*
* It would be nice to do this after all writes have
* been done, but that would still expose the ISUID/ISGID
* to another app after the partial write is committed.
*
* Note: we don't call zfs_fuid_map_id() here because
* user 0 is not an ephemeral uid.
*/
mutex_enter(&zp->z_acl_lock);
if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) | (S_IXUSR >> 6))) != 0 &&
(zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
secpolicy_vnode_setid_retain(zp, cr,
((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) {
uint64_t newmode;
zp->z_mode &= ~(S_ISUID | S_ISGID);
newmode = zp->z_mode;
(void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
(void *)&newmode, sizeof (uint64_t), tx);
mutex_exit(&zp->z_acl_lock);
/*
* Make sure SUID/SGID bits will be removed when we replay the
* log. If the setid bits are keep coming back, don't log more
* than one TX_SETATTR per transaction group.
*/
if (*clear_setid_bits_txgp != dmu_tx_get_txg(tx)) {
vattr_t va = {0};
va.va_mask = ATTR_MODE;
va.va_nodeid = zp->z_id;
va.va_mode = newmode;
zfs_log_setattr(zilog, tx, TX_SETATTR, zp, &va,
ATTR_MODE, NULL);
*clear_setid_bits_txgp = dmu_tx_get_txg(tx);
}
} else {
mutex_exit(&zp->z_acl_lock);
}
}
/*
* Write the bytes to a file.
*
* IN: zp - znode of file to be written to.
* uio - structure supplying write location, range info,
* and data buffer.
* ioflag - O_APPEND flag set if in append mode.
* O_DIRECT flag; used to bypass page cache.
* cr - credentials of caller.
*
* OUT: uio - updated offset and range.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* ip - ctime|mtime updated if byte count > 0
*/
int
zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
{
int error = 0, error1;
ssize_t start_resid = zfs_uio_resid(uio);
uint64_t clear_setid_bits_txg = 0;
/*
* Fasttrack empty write
*/
ssize_t n = start_resid;
if (n == 0)
return (0);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
sa_bulk_attr_t bulk[4];
int count = 0;
uint64_t mtime[2], ctime[2];
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
/*
* Callers might not be able to detect properly that we are read-only,
* so check it explicitly here.
*/
if (zfs_is_readonly(zfsvfs)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EROFS));
}
/*
* If immutable or not appending then return EPERM.
* Intentionally allow ZFS_READONLY through here.
* See zfs_zaccess_common()
*/
if ((zp->z_pflags & ZFS_IMMUTABLE) ||
((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & O_APPEND) &&
(zfs_uio_offset(uio) < zp->z_size))) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
/*
* Validate file offset
*/
offset_t woff = ioflag & O_APPEND ? zp->z_size : zfs_uio_offset(uio);
if (woff < 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Pre-fault the pages to ensure slow (eg NFS) pages
* don't hold up txg.
*/
ssize_t pfbytes = MIN(n, DMU_MAX_ACCESS >> 1);
if (zfs_uio_prefaultpages(pfbytes, uio)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EFAULT));
}
/*
* If in append mode, set the io offset pointer to eof.
*/
zfs_locked_range_t *lr;
if (ioflag & O_APPEND) {
/*
* Obtain an appending range lock to guarantee file append
* semantics. We reset the write offset once we have the lock.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
woff = lr->lr_offset;
if (lr->lr_length == UINT64_MAX) {
/*
* We overlocked the file because this write will cause
* the file block size to increase.
* Note that zp_size cannot change with this lock held.
*/
woff = zp->z_size;
}
zfs_uio_setoffset(uio, woff);
} else {
/*
* Note that if the file block size will change as a result of
* this write, then this range lock will lock the entire file
* so that we can re-write the block safely.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
}
if (zn_rlimit_fsize_uio(zp, uio)) {
zfs_rangelock_exit(lr);
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EFBIG));
}
const rlim64_t limit = MAXOFFSET_T;
if (woff >= limit) {
zfs_rangelock_exit(lr);
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EFBIG));
}
if (n > limit - woff)
n = limit - woff;
uint64_t end_size = MAX(zp->z_size, woff + n);
zilog_t *zilog = zfsvfs->z_log;
const uint64_t uid = KUID_TO_SUID(ZTOUID(zp));
const uint64_t gid = KGID_TO_SGID(ZTOGID(zp));
const uint64_t projid = zp->z_projid;
/*
* Write the file in reasonable size chunks. Each chunk is written
* in a separate transaction; this keeps the intent log records small
* and allows us to do more fine-grained space accounting.
*/
while (n > 0) {
woff = zfs_uio_offset(uio);
if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, uid) ||
zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, gid) ||
(projid != ZFS_DEFAULT_PROJID &&
zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
projid))) {
error = SET_ERROR(EDQUOT);
break;
}
uint64_t blksz;
if (lr->lr_length == UINT64_MAX && zp->z_size <= zp->z_blksz) {
if (zp->z_blksz > zfsvfs->z_max_blksz &&
!ISP2(zp->z_blksz)) {
/*
* File's blocksize is already larger than the
* "recordsize" property. Only let it grow to
* the next power of 2.
*/
blksz = 1 << highbit64(zp->z_blksz);
} else {
blksz = zfsvfs->z_max_blksz;
}
blksz = MIN(blksz, P2ROUNDUP(end_size,
SPA_MINBLOCKSIZE));
blksz = MAX(blksz, zp->z_blksz);
} else {
blksz = zp->z_blksz;
}
arc_buf_t *abuf = NULL;
ssize_t nbytes = n;
if (n >= blksz && woff >= zp->z_size &&
P2PHASE(woff, blksz) == 0 &&
(blksz >= SPA_OLD_MAXBLOCKSIZE || n < 4 * blksz)) {
/*
* This write covers a full block. "Borrow" a buffer
* from the dmu so that we can fill it before we enter
* a transaction. This avoids the possibility of
* holding up the transaction if the data copy hangs
* up on a pagefault (e.g., from an NFS server mapping).
*/
abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
blksz);
ASSERT(abuf != NULL);
ASSERT(arc_buf_size(abuf) == blksz);
if ((error = zfs_uiocopy(abuf->b_data, blksz,
UIO_WRITE, uio, &nbytes))) {
dmu_return_arcbuf(abuf);
break;
}
ASSERT3S(nbytes, ==, blksz);
} else {
nbytes = MIN(n, (DMU_MAX_ACCESS >> 1) -
P2PHASE(woff, blksz));
if (pfbytes < nbytes) {
if (zfs_uio_prefaultpages(nbytes, uio)) {
error = SET_ERROR(EFAULT);
break;
}
pfbytes = nbytes;
}
}
/*
* Start a transaction.
*/
dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
DB_DNODE_ENTER(db);
dmu_tx_hold_write_by_dnode(tx, DB_DNODE(db), woff, nbytes);
DB_DNODE_EXIT(db);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
if (abuf != NULL)
dmu_return_arcbuf(abuf);
break;
}
/*
* NB: We must call zfs_clear_setid_bits_if_necessary before
* committing the transaction!
*/
/*
* If rangelock_enter() over-locked we grow the blocksize
* and then reduce the lock range. This will only happen
* on the first iteration since rangelock_reduce() will
* shrink down lr_length to the appropriate size.
*/
if (lr->lr_length == UINT64_MAX) {
zfs_grow_blocksize(zp, blksz, tx);
zfs_rangelock_reduce(lr, woff, n);
}
ssize_t tx_bytes;
if (abuf == NULL) {
tx_bytes = zfs_uio_resid(uio);
zfs_uio_fault_disable(uio, B_TRUE);
error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, nbytes, tx);
zfs_uio_fault_disable(uio, B_FALSE);
#ifdef __linux__
if (error == EFAULT) {
zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
cr, &clear_setid_bits_txg, tx);
dmu_tx_commit(tx);
/*
* Account for partial writes before
* continuing the loop.
* Update needs to occur before the next
* zfs_uio_prefaultpages, or prefaultpages may
* error, and we may break the loop early.
*/
n -= tx_bytes - zfs_uio_resid(uio);
pfbytes -= tx_bytes - zfs_uio_resid(uio);
continue;
}
#endif
/*
* On FreeBSD, EFAULT should be propagated back to the
* VFS, which will handle faulting and will retry.
*/
if (error != 0 && error != EFAULT) {
zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
cr, &clear_setid_bits_txg, tx);
dmu_tx_commit(tx);
break;
}
tx_bytes -= zfs_uio_resid(uio);
} else {
/*
* Thus, we're writing a full block at a block-aligned
* offset and extending the file past EOF.
*
* dmu_assign_arcbuf_by_dbuf() will directly assign the
* arc buffer to a dbuf.
*/
error = dmu_assign_arcbuf_by_dbuf(
sa_get_db(zp->z_sa_hdl), woff, abuf, tx);
if (error != 0) {
/*
* XXX This might not be necessary if
* dmu_assign_arcbuf_by_dbuf is guaranteed
* to be atomic.
*/
zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
cr, &clear_setid_bits_txg, tx);
dmu_return_arcbuf(abuf);
dmu_tx_commit(tx);
break;
}
ASSERT3S(nbytes, <=, zfs_uio_resid(uio));
zfs_uioskip(uio, nbytes);
tx_bytes = nbytes;
}
if (tx_bytes &&
zn_has_cached_data(zp, woff, woff + tx_bytes - 1) &&
!(ioflag & O_DIRECT)) {
update_pages(zp, woff, tx_bytes, zfsvfs->z_os);
}
/*
* If we made no progress, we're done. If we made even
* partial progress, update the znode and ZIL accordingly.
*/
if (tx_bytes == 0) {
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
(void *)&zp->z_size, sizeof (uint64_t), tx);
dmu_tx_commit(tx);
ASSERT(error != 0);
break;
}
zfs_clear_setid_bits_if_necessary(zfsvfs, zp, cr,
&clear_setid_bits_txg, tx);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
/*
* Update the file size (zp_size) if it has changed;
* account for possible concurrent updates.
*/
while ((end_size = zp->z_size) < zfs_uio_offset(uio)) {
(void) atomic_cas_64(&zp->z_size, end_size,
zfs_uio_offset(uio));
ASSERT(error == 0 || error == EFAULT);
}
/*
* If we are replaying and eof is non zero then force
* the file size to the specified eof. Note, there's no
* concurrency during replay.
*/
if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
zp->z_size = zfsvfs->z_replay_eof;
error1 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
if (error1 != 0)
/* Avoid clobbering EFAULT. */
error = error1;
/*
* NB: During replay, the TX_SETATTR record logged by
* zfs_clear_setid_bits_if_necessary must precede any of
* the TX_WRITE records logged here.
*/
zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag,
NULL, NULL);
dmu_tx_commit(tx);
if (error != 0)
break;
ASSERT3S(tx_bytes, ==, nbytes);
n -= nbytes;
pfbytes -= nbytes;
}
zfs_znode_update_vfs(zp);
zfs_rangelock_exit(lr);
/*
* If we're in replay mode, or we made no progress, or the
* uio data is inaccessible return an error. Otherwise, it's
* at least a partial write, so it's successful.
*/
if (zfsvfs->z_replay || zfs_uio_resid(uio) == start_resid ||
error == EFAULT) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
if (ioflag & (O_SYNC | O_DSYNC) ||
zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, zp->z_id);
const int64_t nwritten = start_resid - zfs_uio_resid(uio);
dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten);
task_io_account_write(nwritten);
zfs_exit(zfsvfs, FTAG);
return (0);
}
int
zfs_getsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
error = zfs_getacl(zp, vsecp, skipaclchk, cr);
zfs_exit(zfsvfs, FTAG);
return (error);
}
int
zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
zilog_t *zilog = zfsvfs->z_log;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
error = zfs_setacl(zp, vsecp, skipaclchk, cr);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
}
#ifdef ZFS_DEBUG
static int zil_fault_io = 0;
#endif
static void zfs_get_done(zgd_t *zgd, int error);
/*
* Get data to generate a TX_WRITE intent log record.
*/
int
zfs_get_data(void *arg, uint64_t gen, lr_write_t *lr, char *buf,
struct lwb *lwb, zio_t *zio)
{
zfsvfs_t *zfsvfs = arg;
objset_t *os = zfsvfs->z_os;
znode_t *zp;
uint64_t object = lr->lr_foid;
uint64_t offset = lr->lr_offset;
uint64_t size = lr->lr_length;
dmu_buf_t *db;
zgd_t *zgd;
int error = 0;
uint64_t zp_gen;
ASSERT3P(lwb, !=, NULL);
- ASSERT3P(zio, !=, NULL);
ASSERT3U(size, !=, 0);
/*
* Nothing to do if the file has been removed
*/
if (zfs_zget(zfsvfs, object, &zp) != 0)
return (SET_ERROR(ENOENT));
if (zp->z_unlinked) {
/*
* Release the vnode asynchronously as we currently have the
* txg stopped from syncing.
*/
zfs_zrele_async(zp);
return (SET_ERROR(ENOENT));
}
/* check if generation number matches */
if (sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
sizeof (zp_gen)) != 0) {
zfs_zrele_async(zp);
return (SET_ERROR(EIO));
}
if (zp_gen != gen) {
zfs_zrele_async(zp);
return (SET_ERROR(ENOENT));
}
zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
zgd->zgd_lwb = lwb;
zgd->zgd_private = zp;
/*
* Write records come in two flavors: immediate and indirect.
* For small writes it's cheaper to store the data with the
* log record (immediate); for large writes it's cheaper to
* sync the data and get a pointer to it (indirect) so that
* we don't have to write the data twice.
*/
if (buf != NULL) { /* immediate write */
zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
offset, size, RL_READER);
/* test for truncation needs to be done while range locked */
if (offset >= zp->z_size) {
error = SET_ERROR(ENOENT);
} else {
error = dmu_read(os, object, offset, size, buf,
DMU_READ_NO_PREFETCH);
}
ASSERT(error == 0 || error == ENOENT);
} else { /* indirect write */
+ ASSERT3P(zio, !=, NULL);
/*
* Have to lock the whole block to ensure when it's
* written out and its checksum is being calculated
* that no one can change the data. We need to re-check
* blocksize after we get the lock in case it's changed!
*/
for (;;) {
uint64_t blkoff;
size = zp->z_blksz;
blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
offset -= blkoff;
zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
offset, size, RL_READER);
if (zp->z_blksz == size)
break;
offset += blkoff;
zfs_rangelock_exit(zgd->zgd_lr);
}
/* test for truncation needs to be done while range locked */
if (lr->lr_offset >= zp->z_size)
error = SET_ERROR(ENOENT);
#ifdef ZFS_DEBUG
if (zil_fault_io) {
error = SET_ERROR(EIO);
zil_fault_io = 0;
}
#endif
if (error == 0)
- error = dmu_buf_hold(os, object, offset, zgd, &db,
- DMU_READ_NO_PREFETCH);
+ error = dmu_buf_hold_noread(os, object, offset, zgd,
+ &db);
if (error == 0) {
blkptr_t *bp = &lr->lr_blkptr;
zgd->zgd_db = db;
zgd->zgd_bp = bp;
ASSERT(db->db_offset == offset);
ASSERT(db->db_size == size);
error = dmu_sync(zio, lr->lr_common.lrc_txg,
zfs_get_done, zgd);
ASSERT(error || lr->lr_length <= size);
/*
* On success, we need to wait for the write I/O
* initiated by dmu_sync() to complete before we can
* release this dbuf. We will finish everything up
* in the zfs_get_done() callback.
*/
if (error == 0)
return (0);
if (error == EALREADY) {
lr->lr_common.lrc_txtype = TX_WRITE2;
/*
* TX_WRITE2 relies on the data previously
* written by the TX_WRITE that caused
* EALREADY. We zero out the BP because
* it is the old, currently-on-disk BP.
*/
zgd->zgd_bp = NULL;
BP_ZERO(bp);
error = 0;
}
}
}
zfs_get_done(zgd, error);
return (error);
}
static void
zfs_get_done(zgd_t *zgd, int error)
{
(void) error;
znode_t *zp = zgd->zgd_private;
if (zgd->zgd_db)
dmu_buf_rele(zgd->zgd_db, zgd);
zfs_rangelock_exit(zgd->zgd_lr);
/*
* Release the vnode asynchronously as we currently have the
* txg stopped from syncing.
*/
zfs_zrele_async(zp);
kmem_free(zgd, sizeof (zgd_t));
}
static int
zfs_enter_two(zfsvfs_t *zfsvfs1, zfsvfs_t *zfsvfs2, const char *tag)
{
int error;
/* Swap. Not sure if the order of zfs_enter()s is important. */
if (zfsvfs1 > zfsvfs2) {
zfsvfs_t *tmpzfsvfs;
tmpzfsvfs = zfsvfs2;
zfsvfs2 = zfsvfs1;
zfsvfs1 = tmpzfsvfs;
}
error = zfs_enter(zfsvfs1, tag);
if (error != 0)
return (error);
if (zfsvfs1 != zfsvfs2) {
error = zfs_enter(zfsvfs2, tag);
if (error != 0) {
zfs_exit(zfsvfs1, tag);
return (error);
}
}
return (0);
}
static void
zfs_exit_two(zfsvfs_t *zfsvfs1, zfsvfs_t *zfsvfs2, const char *tag)
{
zfs_exit(zfsvfs1, tag);
if (zfsvfs1 != zfsvfs2)
zfs_exit(zfsvfs2, tag);
}
/*
* We split each clone request in chunks that can fit into a single ZIL
* log entry. Each ZIL log entry can fit 130816 bytes for a block cloning
* operation (see zil_max_log_data() and zfs_log_clone_range()). This gives
* us room for storing 1022 block pointers.
*
* On success, the function return the number of bytes copied in *lenp.
* Note, it doesn't return how much bytes are left to be copied.
+ * On errors which are caused by any file system limitations or
+ * brt limitations `EINVAL` is returned. In the most cases a user
+ * requested bad parameters, it could be possible to clone the file but
+ * some parameters don't match the requirements.
*/
int
zfs_clone_range(znode_t *inzp, uint64_t *inoffp, znode_t *outzp,
uint64_t *outoffp, uint64_t *lenp, cred_t *cr)
{
zfsvfs_t *inzfsvfs, *outzfsvfs;
objset_t *inos, *outos;
zfs_locked_range_t *inlr, *outlr;
dmu_buf_impl_t *db;
dmu_tx_t *tx;
zilog_t *zilog;
uint64_t inoff, outoff, len, done;
uint64_t outsize, size;
int error;
int count = 0;
sa_bulk_attr_t bulk[3];
uint64_t mtime[2], ctime[2];
uint64_t uid, gid, projid;
blkptr_t *bps;
size_t maxblocks, nbps;
uint_t inblksz;
uint64_t clear_setid_bits_txg = 0;
inoff = *inoffp;
outoff = *outoffp;
len = *lenp;
done = 0;
inzfsvfs = ZTOZSB(inzp);
outzfsvfs = ZTOZSB(outzp);
/*
* We need to call zfs_enter() potentially on two different datasets,
* so we need a dedicated function for that.
*/
error = zfs_enter_two(inzfsvfs, outzfsvfs, FTAG);
if (error != 0)
return (error);
inos = inzfsvfs->z_os;
outos = outzfsvfs->z_os;
/*
* Both source and destination have to belong to the same storage pool.
*/
if (dmu_objset_spa(inos) != dmu_objset_spa(outos)) {
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
return (SET_ERROR(EXDEV));
}
+ /*
+ * outos and inos belongs to the same storage pool.
+ * see a few lines above, only one check.
+ */
+ if (!spa_feature_is_enabled(dmu_objset_spa(outos),
+ SPA_FEATURE_BLOCK_CLONING)) {
+ zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
+ return (SET_ERROR(EOPNOTSUPP));
+ }
+
ASSERT(!outzfsvfs->z_replay);
error = zfs_verify_zp(inzp);
if (error == 0)
error = zfs_verify_zp(outzp);
if (error != 0) {
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
return (error);
}
- if (!spa_feature_is_enabled(dmu_objset_spa(outos),
- SPA_FEATURE_BLOCK_CLONING)) {
- zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
- return (SET_ERROR(EXDEV));
- }
-
/*
* We don't copy source file's flags that's why we don't allow to clone
* files that are in quarantine.
*/
if (inzp->z_pflags & ZFS_AV_QUARANTINED) {
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
return (SET_ERROR(EACCES));
}
if (inoff >= inzp->z_size) {
*lenp = 0;
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
return (0);
}
if (len > inzp->z_size - inoff) {
len = inzp->z_size - inoff;
}
if (len == 0) {
*lenp = 0;
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
return (0);
}
/*
* Callers might not be able to detect properly that we are read-only,
* so check it explicitly here.
*/
if (zfs_is_readonly(outzfsvfs)) {
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
return (SET_ERROR(EROFS));
}
/*
* If immutable or not appending then return EPERM.
* Intentionally allow ZFS_READONLY through here.
* See zfs_zaccess_common()
*/
if ((outzp->z_pflags & ZFS_IMMUTABLE) != 0) {
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
/*
* No overlapping if we are cloning within the same file.
*/
if (inzp == outzp) {
if (inoff < outoff + len && outoff < inoff + len) {
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
}
/*
* Maintain predictable lock order.
*/
if (inzp < outzp || (inzp == outzp && inoff < outoff)) {
inlr = zfs_rangelock_enter(&inzp->z_rangelock, inoff, len,
RL_READER);
outlr = zfs_rangelock_enter(&outzp->z_rangelock, outoff, len,
RL_WRITER);
} else {
outlr = zfs_rangelock_enter(&outzp->z_rangelock, outoff, len,
RL_WRITER);
inlr = zfs_rangelock_enter(&inzp->z_rangelock, inoff, len,
RL_READER);
}
inblksz = inzp->z_blksz;
/*
* We cannot clone into files with different block size.
*/
if (inblksz != outzp->z_blksz && outzp->z_size > inblksz) {
- error = SET_ERROR(EXDEV);
+ error = SET_ERROR(EINVAL);
goto unlock;
}
/*
* Offsets and len must be at block boundries.
*/
if ((inoff % inblksz) != 0 || (outoff % inblksz) != 0) {
- error = SET_ERROR(EXDEV);
+ error = SET_ERROR(EINVAL);
goto unlock;
}
/*
* Length must be multipe of blksz, except for the end of the file.
*/
if ((len % inblksz) != 0 &&
(len < inzp->z_size - inoff || len < outzp->z_size - outoff)) {
- error = SET_ERROR(EXDEV);
+ error = SET_ERROR(EINVAL);
goto unlock;
}
error = zn_rlimit_fsize(outoff + len);
if (error != 0) {
goto unlock;
}
if (inoff >= MAXOFFSET_T || outoff >= MAXOFFSET_T) {
error = SET_ERROR(EFBIG);
goto unlock;
}
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(outzfsvfs), NULL,
&mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(outzfsvfs), NULL,
&ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(outzfsvfs), NULL,
&outzp->z_size, 8);
zilog = outzfsvfs->z_log;
maxblocks = zil_max_log_data(zilog, sizeof (lr_clone_range_t)) /
sizeof (bps[0]);
uid = KUID_TO_SUID(ZTOUID(outzp));
gid = KGID_TO_SGID(ZTOGID(outzp));
projid = outzp->z_projid;
- bps = kmem_alloc(sizeof (bps[0]) * maxblocks, KM_SLEEP);
+ bps = vmem_alloc(sizeof (bps[0]) * maxblocks, KM_SLEEP);
/*
* Clone the file in reasonable size chunks. Each chunk is cloned
* in a separate transaction; this keeps the intent log records small
* and allows us to do more fine-grained space accounting.
*/
while (len > 0) {
size = MIN(inblksz * maxblocks, len);
if (zfs_id_overblockquota(outzfsvfs, DMU_USERUSED_OBJECT,
uid) ||
zfs_id_overblockquota(outzfsvfs, DMU_GROUPUSED_OBJECT,
gid) ||
(projid != ZFS_DEFAULT_PROJID &&
zfs_id_overblockquota(outzfsvfs, DMU_PROJECTUSED_OBJECT,
projid))) {
error = SET_ERROR(EDQUOT);
break;
}
nbps = maxblocks;
error = dmu_read_l0_bps(inos, inzp->z_id, inoff, size, bps,
&nbps);
if (error != 0) {
/*
- * If we are tyring to clone a block that was created
- * in the current transaction group. Return an error,
- * so the caller can fallback to just copying the data.
+ * If we are trying to clone a block that was created
+ * in the current transaction group, error will be
+ * EAGAIN here, which we can just return to the caller
+ * so it can fallback if it likes.
*/
- if (error == EAGAIN) {
- error = SET_ERROR(EXDEV);
- }
break;
}
/*
* Encrypted data is fine as long as it comes from the same
* dataset.
* TODO: We want to extend it in the future to allow cloning to
* datasets with the same keys, like clones or to be able to
* clone a file from a snapshot of an encrypted dataset into the
* dataset itself.
*/
if (BP_IS_PROTECTED(&bps[0])) {
if (inzfsvfs != outzfsvfs) {
error = SET_ERROR(EXDEV);
break;
}
}
/*
* Start a transaction.
*/
tx = dmu_tx_create(outos);
dmu_tx_hold_sa(tx, outzp->z_sa_hdl, B_FALSE);
db = (dmu_buf_impl_t *)sa_get_db(outzp->z_sa_hdl);
DB_DNODE_ENTER(db);
dmu_tx_hold_clone_by_dnode(tx, DB_DNODE(db), outoff, size);
DB_DNODE_EXIT(db);
zfs_sa_upgrade_txholds(tx, outzp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
dmu_tx_abort(tx);
break;
}
/*
* Copy source znode's block size. This only happens on the
* first iteration since zfs_rangelock_reduce() will shrink down
* lr_len to the appropriate size.
*/
if (outlr->lr_length == UINT64_MAX) {
zfs_grow_blocksize(outzp, inblksz, tx);
/*
* Round range lock up to the block boundary, so we
* prevent appends until we are done.
*/
zfs_rangelock_reduce(outlr, outoff,
((len - 1) / inblksz + 1) * inblksz);
}
error = dmu_brt_clone(outos, outzp->z_id, outoff, size, tx,
bps, nbps, B_FALSE);
if (error != 0) {
dmu_tx_commit(tx);
break;
}
zfs_clear_setid_bits_if_necessary(outzfsvfs, outzp, cr,
&clear_setid_bits_txg, tx);
zfs_tstamp_update_setup(outzp, CONTENT_MODIFIED, mtime, ctime);
/*
* Update the file size (zp_size) if it has changed;
* account for possible concurrent updates.
*/
while ((outsize = outzp->z_size) < outoff + size) {
(void) atomic_cas_64(&outzp->z_size, outsize,
outoff + size);
}
error = sa_bulk_update(outzp->z_sa_hdl, bulk, count, tx);
zfs_log_clone_range(zilog, tx, TX_CLONE_RANGE, outzp, outoff,
size, inblksz, bps, nbps);
dmu_tx_commit(tx);
if (error != 0)
break;
inoff += size;
outoff += size;
len -= size;
done += size;
}
- kmem_free(bps, sizeof (bps[0]) * maxblocks);
+ vmem_free(bps, sizeof (bps[0]) * maxblocks);
zfs_znode_update_vfs(outzp);
unlock:
zfs_rangelock_exit(outlr);
zfs_rangelock_exit(inlr);
if (done > 0) {
/*
* If we have made at least partial progress, reset the error.
*/
error = 0;
ZFS_ACCESSTIME_STAMP(inzfsvfs, inzp);
if (outos->os_sync == ZFS_SYNC_ALWAYS) {
zil_commit(zilog, outzp->z_id);
}
*inoffp += done;
*outoffp += done;
*lenp = done;
}
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
return (error);
}
/*
* Usual pattern would be to call zfs_clone_range() from zfs_replay_clone(),
* but we cannot do that, because when replaying we don't have source znode
* available. This is why we need a dedicated replay function.
*/
int
zfs_clone_range_replay(znode_t *zp, uint64_t off, uint64_t len, uint64_t blksz,
const blkptr_t *bps, size_t nbps)
{
zfsvfs_t *zfsvfs;
dmu_buf_impl_t *db;
dmu_tx_t *tx;
int error;
int count = 0;
sa_bulk_attr_t bulk[3];
uint64_t mtime[2], ctime[2];
ASSERT3U(off, <, MAXOFFSET_T);
ASSERT3U(len, >, 0);
ASSERT3U(nbps, >, 0);
zfsvfs = ZTOZSB(zp);
ASSERT(spa_feature_is_enabled(dmu_objset_spa(zfsvfs->z_os),
SPA_FEATURE_BLOCK_CLONING));
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
ASSERT(zfsvfs->z_replay);
ASSERT(!zfs_is_readonly(zfsvfs));
if ((off % blksz) != 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, 8);
/*
* Start a transaction.
*/
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
DB_DNODE_ENTER(db);
dmu_tx_hold_clone_by_dnode(tx, DB_DNODE(db), off, len);
DB_DNODE_EXIT(db);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
dmu_tx_abort(tx);
zfs_exit(zfsvfs, FTAG);
return (error);
}
if (zp->z_blksz < blksz)
zfs_grow_blocksize(zp, blksz, tx);
dmu_brt_clone(zfsvfs->z_os, zp->z_id, off, len, tx, bps, nbps, B_TRUE);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
if (zp->z_size < off + len)
zp->z_size = off + len;
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
/*
* zil_replaying() not only check if we are replaying ZIL, but also
* updates the ZIL header to record replay progress.
*/
VERIFY(zil_replaying(zfsvfs->z_log, tx));
dmu_tx_commit(tx);
zfs_znode_update_vfs(zp);
zfs_exit(zfsvfs, FTAG);
return (error);
}
EXPORT_SYMBOL(zfs_access);
EXPORT_SYMBOL(zfs_fsync);
EXPORT_SYMBOL(zfs_holey);
EXPORT_SYMBOL(zfs_read);
EXPORT_SYMBOL(zfs_write);
EXPORT_SYMBOL(zfs_getsecattr);
EXPORT_SYMBOL(zfs_setsecattr);
EXPORT_SYMBOL(zfs_clone_range);
EXPORT_SYMBOL(zfs_clone_range_replay);
ZFS_MODULE_PARAM(zfs_vnops, zfs_vnops_, read_chunk_size, U64, ZMOD_RW,
"Bytes to read per chunk");
diff --git a/sys/contrib/openzfs/module/zfs/zil.c b/sys/contrib/openzfs/module/zfs/zil.c
index be5b9edf6ede..b30676b42d88 100644
--- a/sys/contrib/openzfs/module/zfs/zil.c
+++ b/sys/contrib/openzfs/module/zfs/zil.c
@@ -1,4297 +1,4219 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright (c) 2018 Datto Inc.
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/arc.h>
#include <sys/stat.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
#include <sys/dsl_dataset.h>
#include <sys/vdev_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_pool.h>
#include <sys/metaslab.h>
#include <sys/trace_zfs.h>
#include <sys/abd.h>
#include <sys/brt.h>
#include <sys/wmsum.h>
/*
* The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system
* calls that change the file system. Each itx has enough information to
* be able to replay them after a system crash, power loss, or
* equivalent failure mode. These are stored in memory until either:
*
* 1. they are committed to the pool by the DMU transaction group
* (txg), at which point they can be discarded; or
* 2. they are committed to the on-disk ZIL for the dataset being
* modified (e.g. due to an fsync, O_DSYNC, or other synchronous
* requirement).
*
* In the event of a crash or power loss, the itxs contained by each
* dataset's on-disk ZIL will be replayed when that dataset is first
* instantiated (e.g. if the dataset is a normal filesystem, when it is
* first mounted).
*
* As hinted at above, there is one ZIL per dataset (both the in-memory
* representation, and the on-disk representation). The on-disk format
* consists of 3 parts:
*
* - a single, per-dataset, ZIL header; which points to a chain of
* - zero or more ZIL blocks; each of which contains
* - zero or more ZIL records
*
* A ZIL record holds the information necessary to replay a single
* system call transaction. A ZIL block can hold many ZIL records, and
* the blocks are chained together, similarly to a singly linked list.
*
* Each ZIL block contains a block pointer (blkptr_t) to the next ZIL
* block in the chain, and the ZIL header points to the first block in
* the chain.
*
* Note, there is not a fixed place in the pool to hold these ZIL
* blocks; they are dynamically allocated and freed as needed from the
* blocks available on the pool, though they can be preferentially
* allocated from a dedicated "log" vdev.
*/
/*
* This controls the amount of time that a ZIL block (lwb) will remain
* "open" when it isn't "full", and it has a thread waiting for it to be
* committed to stable storage. Please refer to the zil_commit_waiter()
* function (and the comments within it) for more details.
*/
static uint_t zfs_commit_timeout_pct = 5;
/*
* Minimal time we care to delay commit waiting for more ZIL records.
* At least FreeBSD kernel can't sleep for less than 2us at its best.
* So requests to sleep for less then 5us is a waste of CPU time with
* a risk of significant log latency increase due to oversleep.
*/
static uint64_t zil_min_commit_timeout = 5000;
/*
* See zil.h for more information about these fields.
*/
static zil_kstat_values_t zil_stats = {
{ "zil_commit_count", KSTAT_DATA_UINT64 },
{ "zil_commit_writer_count", KSTAT_DATA_UINT64 },
{ "zil_itx_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_copied_count", KSTAT_DATA_UINT64 },
{ "zil_itx_copied_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_needcopy_count", KSTAT_DATA_UINT64 },
{ "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_write", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_alloc", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_write", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_alloc", KSTAT_DATA_UINT64 },
};
static zil_sums_t zil_sums_global;
static kstat_t *zil_kstats_global;
/*
* Disable intent logging replay. This global ZIL switch affects all pools.
*/
int zil_replay_disable = 0;
/*
* Disable the DKIOCFLUSHWRITECACHE commands that are normally sent to
* the disk(s) by the ZIL after an LWB write has completed. Setting this
* will cause ZIL corruption on power loss if a volatile out-of-order
* write cache is enabled.
*/
static int zil_nocacheflush = 0;
/*
* Limit SLOG write size per commit executed with synchronous priority.
* Any writes above that will be executed with lower (asynchronous) priority
* to limit potential SLOG device abuse by single active ZIL writer.
*/
static uint64_t zil_slog_bulk = 768 * 1024;
static kmem_cache_t *zil_lwb_cache;
static kmem_cache_t *zil_zcw_cache;
static void zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx);
-static void zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb);
static itx_t *zil_itx_clone(itx_t *oitx);
static int
zil_bp_compare(const void *x1, const void *x2)
{
const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva;
const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva;
int cmp = TREE_CMP(DVA_GET_VDEV(dva1), DVA_GET_VDEV(dva2));
if (likely(cmp))
return (cmp);
return (TREE_CMP(DVA_GET_OFFSET(dva1), DVA_GET_OFFSET(dva2)));
}
static void
zil_bp_tree_init(zilog_t *zilog)
{
avl_create(&zilog->zl_bp_tree, zil_bp_compare,
sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node));
}
static void
zil_bp_tree_fini(zilog_t *zilog)
{
avl_tree_t *t = &zilog->zl_bp_tree;
zil_bp_node_t *zn;
void *cookie = NULL;
while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
kmem_free(zn, sizeof (zil_bp_node_t));
avl_destroy(t);
}
int
zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
{
avl_tree_t *t = &zilog->zl_bp_tree;
const dva_t *dva;
zil_bp_node_t *zn;
avl_index_t where;
if (BP_IS_EMBEDDED(bp))
return (0);
dva = BP_IDENTITY(bp);
if (avl_find(t, dva, &where) != NULL)
return (SET_ERROR(EEXIST));
zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP);
zn->zn_dva = *dva;
avl_insert(t, zn, where);
return (0);
}
static zil_header_t *
zil_header_in_syncing_context(zilog_t *zilog)
{
return ((zil_header_t *)zilog->zl_header);
}
static void
zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
{
zio_cksum_t *zc = &bp->blk_cksum;
(void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_0],
sizeof (zc->zc_word[ZIL_ZC_GUID_0]));
(void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_1],
sizeof (zc->zc_word[ZIL_ZC_GUID_1]));
zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
}
static int
zil_kstats_global_update(kstat_t *ksp, int rw)
{
zil_kstat_values_t *zs = ksp->ks_data;
ASSERT3P(&zil_stats, ==, zs);
if (rw == KSTAT_WRITE) {
return (SET_ERROR(EACCES));
}
zil_kstat_values_update(zs, &zil_sums_global);
return (0);
}
/*
* Read a log block and make sure it's valid.
*/
static int
zil_read_log_block(zilog_t *zilog, boolean_t decrypt, const blkptr_t *bp,
blkptr_t *nbp, char **begin, char **end, arc_buf_t **abuf)
{
zio_flag_t zio_flags = ZIO_FLAG_CANFAIL;
arc_flags_t aflags = ARC_FLAG_WAIT;
zbookmark_phys_t zb;
int error;
if (zilog->zl_header->zh_claim_txg == 0)
zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
zio_flags |= ZIO_FLAG_SPECULATIVE;
if (!decrypt)
zio_flags |= ZIO_FLAG_RAW;
SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func,
abuf, ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (error == 0) {
zio_cksum_t cksum = bp->blk_cksum;
/*
* Validate the checksummed log block.
*
* Sequence numbers should be... sequential. The checksum
* verifier for the next block should be bp's checksum plus 1.
*
* Also check the log chain linkage and size used.
*/
cksum.zc_word[ZIL_ZC_SEQ]++;
uint64_t size = BP_GET_LSIZE(bp);
if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
zil_chain_t *zilc = (*abuf)->b_data;
char *lr = (char *)(zilc + 1);
if (memcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
- sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) ||
+ sizeof (cksum)) ||
zilc->zc_nused < sizeof (*zilc) ||
zilc->zc_nused > size) {
error = SET_ERROR(ECKSUM);
} else {
*begin = lr;
*end = lr + zilc->zc_nused - sizeof (*zilc);
*nbp = zilc->zc_next_blk;
}
} else {
char *lr = (*abuf)->b_data;
zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1;
if (memcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
- sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) ||
+ sizeof (cksum)) ||
(zilc->zc_nused > (size - sizeof (*zilc)))) {
error = SET_ERROR(ECKSUM);
} else {
*begin = lr;
*end = lr + zilc->zc_nused;
*nbp = zilc->zc_next_blk;
}
}
}
return (error);
}
/*
* Read a TX_WRITE log data block.
*/
static int
zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
{
zio_flag_t zio_flags = ZIO_FLAG_CANFAIL;
const blkptr_t *bp = &lr->lr_blkptr;
arc_flags_t aflags = ARC_FLAG_WAIT;
arc_buf_t *abuf = NULL;
zbookmark_phys_t zb;
int error;
if (BP_IS_HOLE(bp)) {
if (wbuf != NULL)
memset(wbuf, 0, MAX(BP_GET_LSIZE(bp), lr->lr_length));
return (0);
}
if (zilog->zl_header->zh_claim_txg == 0)
zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
/*
* If we are not using the resulting data, we are just checking that
* it hasn't been corrupted so we don't need to waste CPU time
* decompressing and decrypting it.
*/
if (wbuf == NULL)
zio_flags |= ZIO_FLAG_RAW;
ASSERT3U(BP_GET_LSIZE(bp), !=, 0);
SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (error == 0) {
if (wbuf != NULL)
memcpy(wbuf, abuf->b_data, arc_buf_size(abuf));
arc_buf_destroy(abuf, &abuf);
}
return (error);
}
void
zil_sums_init(zil_sums_t *zs)
{
wmsum_init(&zs->zil_commit_count, 0);
wmsum_init(&zs->zil_commit_writer_count, 0);
wmsum_init(&zs->zil_itx_count, 0);
wmsum_init(&zs->zil_itx_indirect_count, 0);
wmsum_init(&zs->zil_itx_indirect_bytes, 0);
wmsum_init(&zs->zil_itx_copied_count, 0);
wmsum_init(&zs->zil_itx_copied_bytes, 0);
wmsum_init(&zs->zil_itx_needcopy_count, 0);
wmsum_init(&zs->zil_itx_needcopy_bytes, 0);
wmsum_init(&zs->zil_itx_metaslab_normal_count, 0);
wmsum_init(&zs->zil_itx_metaslab_normal_bytes, 0);
wmsum_init(&zs->zil_itx_metaslab_normal_write, 0);
wmsum_init(&zs->zil_itx_metaslab_normal_alloc, 0);
wmsum_init(&zs->zil_itx_metaslab_slog_count, 0);
wmsum_init(&zs->zil_itx_metaslab_slog_bytes, 0);
wmsum_init(&zs->zil_itx_metaslab_slog_write, 0);
wmsum_init(&zs->zil_itx_metaslab_slog_alloc, 0);
}
void
zil_sums_fini(zil_sums_t *zs)
{
wmsum_fini(&zs->zil_commit_count);
wmsum_fini(&zs->zil_commit_writer_count);
wmsum_fini(&zs->zil_itx_count);
wmsum_fini(&zs->zil_itx_indirect_count);
wmsum_fini(&zs->zil_itx_indirect_bytes);
wmsum_fini(&zs->zil_itx_copied_count);
wmsum_fini(&zs->zil_itx_copied_bytes);
wmsum_fini(&zs->zil_itx_needcopy_count);
wmsum_fini(&zs->zil_itx_needcopy_bytes);
wmsum_fini(&zs->zil_itx_metaslab_normal_count);
wmsum_fini(&zs->zil_itx_metaslab_normal_bytes);
wmsum_fini(&zs->zil_itx_metaslab_normal_write);
wmsum_fini(&zs->zil_itx_metaslab_normal_alloc);
wmsum_fini(&zs->zil_itx_metaslab_slog_count);
wmsum_fini(&zs->zil_itx_metaslab_slog_bytes);
wmsum_fini(&zs->zil_itx_metaslab_slog_write);
wmsum_fini(&zs->zil_itx_metaslab_slog_alloc);
}
void
zil_kstat_values_update(zil_kstat_values_t *zs, zil_sums_t *zil_sums)
{
zs->zil_commit_count.value.ui64 =
wmsum_value(&zil_sums->zil_commit_count);
zs->zil_commit_writer_count.value.ui64 =
wmsum_value(&zil_sums->zil_commit_writer_count);
zs->zil_itx_count.value.ui64 =
wmsum_value(&zil_sums->zil_itx_count);
zs->zil_itx_indirect_count.value.ui64 =
wmsum_value(&zil_sums->zil_itx_indirect_count);
zs->zil_itx_indirect_bytes.value.ui64 =
wmsum_value(&zil_sums->zil_itx_indirect_bytes);
zs->zil_itx_copied_count.value.ui64 =
wmsum_value(&zil_sums->zil_itx_copied_count);
zs->zil_itx_copied_bytes.value.ui64 =
wmsum_value(&zil_sums->zil_itx_copied_bytes);
zs->zil_itx_needcopy_count.value.ui64 =
wmsum_value(&zil_sums->zil_itx_needcopy_count);
zs->zil_itx_needcopy_bytes.value.ui64 =
wmsum_value(&zil_sums->zil_itx_needcopy_bytes);
zs->zil_itx_metaslab_normal_count.value.ui64 =
wmsum_value(&zil_sums->zil_itx_metaslab_normal_count);
zs->zil_itx_metaslab_normal_bytes.value.ui64 =
wmsum_value(&zil_sums->zil_itx_metaslab_normal_bytes);
zs->zil_itx_metaslab_normal_write.value.ui64 =
wmsum_value(&zil_sums->zil_itx_metaslab_normal_write);
zs->zil_itx_metaslab_normal_alloc.value.ui64 =
wmsum_value(&zil_sums->zil_itx_metaslab_normal_alloc);
zs->zil_itx_metaslab_slog_count.value.ui64 =
wmsum_value(&zil_sums->zil_itx_metaslab_slog_count);
zs->zil_itx_metaslab_slog_bytes.value.ui64 =
wmsum_value(&zil_sums->zil_itx_metaslab_slog_bytes);
zs->zil_itx_metaslab_slog_write.value.ui64 =
wmsum_value(&zil_sums->zil_itx_metaslab_slog_write);
zs->zil_itx_metaslab_slog_alloc.value.ui64 =
wmsum_value(&zil_sums->zil_itx_metaslab_slog_alloc);
}
/*
* Parse the intent log, and call parse_func for each valid record within.
*/
int
zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg,
boolean_t decrypt)
{
const zil_header_t *zh = zilog->zl_header;
boolean_t claimed = !!zh->zh_claim_txg;
uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX;
uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX;
uint64_t max_blk_seq = 0;
uint64_t max_lr_seq = 0;
uint64_t blk_count = 0;
uint64_t lr_count = 0;
blkptr_t blk, next_blk = {{{{0}}}};
int error = 0;
/*
* Old logs didn't record the maximum zh_claim_lr_seq.
*/
if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
claim_lr_seq = UINT64_MAX;
/*
* Starting at the block pointed to by zh_log we read the log chain.
* For each block in the chain we strongly check that block to
* ensure its validity. We stop when an invalid block is found.
* For each block pointer in the chain we call parse_blk_func().
* For each record in each valid block we call parse_lr_func().
* If the log has been claimed, stop if we encounter a sequence
* number greater than the highest claimed sequence number.
*/
zil_bp_tree_init(zilog);
for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
int reclen;
char *lrp, *end;
arc_buf_t *abuf = NULL;
if (blk_seq > claim_blk_seq)
break;
error = parse_blk_func(zilog, &blk, arg, txg);
if (error != 0)
break;
ASSERT3U(max_blk_seq, <, blk_seq);
max_blk_seq = blk_seq;
blk_count++;
if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq)
break;
error = zil_read_log_block(zilog, decrypt, &blk, &next_blk,
&lrp, &end, &abuf);
if (error != 0) {
if (abuf)
arc_buf_destroy(abuf, &abuf);
if (claimed) {
char name[ZFS_MAX_DATASET_NAME_LEN];
dmu_objset_name(zilog->zl_os, name);
cmn_err(CE_WARN, "ZFS read log block error %d, "
"dataset %s, seq 0x%llx\n", error, name,
(u_longlong_t)blk_seq);
}
break;
}
for (; lrp < end; lrp += reclen) {
lr_t *lr = (lr_t *)lrp;
reclen = lr->lrc_reclen;
ASSERT3U(reclen, >=, sizeof (lr_t));
if (lr->lrc_seq > claim_lr_seq) {
arc_buf_destroy(abuf, &abuf);
goto done;
}
error = parse_lr_func(zilog, lr, arg, txg);
if (error != 0) {
arc_buf_destroy(abuf, &abuf);
goto done;
}
ASSERT3U(max_lr_seq, <, lr->lrc_seq);
max_lr_seq = lr->lrc_seq;
lr_count++;
}
arc_buf_destroy(abuf, &abuf);
}
done:
zilog->zl_parse_error = error;
zilog->zl_parse_blk_seq = max_blk_seq;
zilog->zl_parse_lr_seq = max_lr_seq;
zilog->zl_parse_blk_count = blk_count;
zilog->zl_parse_lr_count = lr_count;
zil_bp_tree_fini(zilog);
return (error);
}
static int
zil_clear_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
uint64_t first_txg)
{
(void) tx;
ASSERT(!BP_IS_HOLE(bp));
/*
* As we call this function from the context of a rewind to a
* checkpoint, each ZIL block whose txg is later than the txg
* that we rewind to is invalid. Thus, we return -1 so
* zil_parse() doesn't attempt to read it.
*/
if (bp->blk_birth >= first_txg)
return (-1);
if (zil_bp_tree_add(zilog, bp) != 0)
return (0);
zio_free(zilog->zl_spa, first_txg, bp);
return (0);
}
static int
zil_noop_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
uint64_t first_txg)
{
(void) zilog, (void) lrc, (void) tx, (void) first_txg;
return (0);
}
static int
zil_claim_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
uint64_t first_txg)
{
/*
* Claim log block if not already committed and not already claimed.
* If tx == NULL, just verify that the block is claimable.
*/
if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg ||
zil_bp_tree_add(zilog, bp) != 0)
return (0);
return (zio_wait(zio_claim(NULL, zilog->zl_spa,
tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB)));
}
static int
zil_claim_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t first_txg)
{
lr_write_t *lr = (lr_write_t *)lrc;
int error;
ASSERT(lrc->lrc_txtype == TX_WRITE);
/*
* If the block is not readable, don't claim it. This can happen
* in normal operation when a log block is written to disk before
* some of the dmu_sync() blocks it points to. In this case, the
* transaction cannot have been committed to anyone (we would have
* waited for all writes to be stable first), so it is semantically
* correct to declare this the end of the log.
*/
if (lr->lr_blkptr.blk_birth >= first_txg) {
error = zil_read_log_data(zilog, lr, NULL);
if (error != 0)
return (error);
}
return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
}
static int
zil_claim_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx)
{
const lr_clone_range_t *lr = (const lr_clone_range_t *)lrc;
const blkptr_t *bp;
spa_t *spa;
uint_t ii;
ASSERT(lrc->lrc_txtype == TX_CLONE_RANGE);
if (tx == NULL) {
return (0);
}
/*
* XXX: Do we need to byteswap lr?
*/
spa = zilog->zl_spa;
for (ii = 0; ii < lr->lr_nbps; ii++) {
bp = &lr->lr_bps[ii];
/*
* When data in embedded into BP there is no need to create
* BRT entry as there is no data block. Just copy the BP as
* it contains the data.
*/
if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) {
brt_pending_add(spa, bp, tx);
}
}
return (0);
}
static int
zil_claim_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
uint64_t first_txg)
{
switch (lrc->lrc_txtype) {
case TX_WRITE:
return (zil_claim_write(zilog, lrc, tx, first_txg));
case TX_CLONE_RANGE:
return (zil_claim_clone_range(zilog, lrc, tx));
default:
return (0);
}
}
static int
zil_free_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
uint64_t claim_txg)
{
(void) claim_txg;
zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
return (0);
}
static int
zil_free_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t claim_txg)
{
lr_write_t *lr = (lr_write_t *)lrc;
blkptr_t *bp = &lr->lr_blkptr;
ASSERT(lrc->lrc_txtype == TX_WRITE);
/*
* If we previously claimed it, we need to free it.
*/
if (bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 &&
!BP_IS_HOLE(bp)) {
zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
}
return (0);
}
static int
zil_free_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx)
{
const lr_clone_range_t *lr = (const lr_clone_range_t *)lrc;
const blkptr_t *bp;
spa_t *spa;
uint_t ii;
ASSERT(lrc->lrc_txtype == TX_CLONE_RANGE);
if (tx == NULL) {
return (0);
}
spa = zilog->zl_spa;
for (ii = 0; ii < lr->lr_nbps; ii++) {
bp = &lr->lr_bps[ii];
if (!BP_IS_HOLE(bp)) {
zio_free(spa, dmu_tx_get_txg(tx), bp);
}
}
return (0);
}
static int
zil_free_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
uint64_t claim_txg)
{
if (claim_txg == 0) {
return (0);
}
switch (lrc->lrc_txtype) {
case TX_WRITE:
return (zil_free_write(zilog, lrc, tx, claim_txg));
case TX_CLONE_RANGE:
return (zil_free_clone_range(zilog, lrc, tx));
default:
return (0);
}
}
static int
zil_lwb_vdev_compare(const void *x1, const void *x2)
{
const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
return (TREE_CMP(v1, v2));
}
+/*
+ * Allocate a new lwb. We may already have a block pointer for it, in which
+ * case we get size and version from there. Or we may not yet, in which case
+ * we choose them here and later make the block allocation match.
+ */
static lwb_t *
-zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg,
- boolean_t fastwrite)
+zil_alloc_lwb(zilog_t *zilog, int sz, blkptr_t *bp, boolean_t slog,
+ uint64_t txg, lwb_state_t state)
{
lwb_t *lwb;
lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
lwb->lwb_zilog = zilog;
- lwb->lwb_blk = *bp;
- lwb->lwb_fastwrite = fastwrite;
+ if (bp) {
+ lwb->lwb_blk = *bp;
+ lwb->lwb_slim = (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2);
+ sz = BP_GET_LSIZE(bp);
+ } else {
+ BP_ZERO(&lwb->lwb_blk);
+ lwb->lwb_slim = (spa_version(zilog->zl_spa) >=
+ SPA_VERSION_SLIM_ZIL);
+ }
lwb->lwb_slog = slog;
- lwb->lwb_indirect = B_FALSE;
- if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
+ lwb->lwb_error = 0;
+ if (lwb->lwb_slim) {
+ lwb->lwb_nmax = sz;
lwb->lwb_nused = lwb->lwb_nfilled = sizeof (zil_chain_t);
- lwb->lwb_sz = BP_GET_LSIZE(bp);
} else {
+ lwb->lwb_nmax = sz - sizeof (zil_chain_t);
lwb->lwb_nused = lwb->lwb_nfilled = 0;
- lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t);
}
- lwb->lwb_state = LWB_STATE_CLOSED;
- lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp));
+ lwb->lwb_sz = sz;
+ lwb->lwb_state = state;
+ lwb->lwb_buf = zio_buf_alloc(sz);
+ lwb->lwb_child_zio = NULL;
lwb->lwb_write_zio = NULL;
lwb->lwb_root_zio = NULL;
lwb->lwb_issued_timestamp = 0;
lwb->lwb_issued_txg = 0;
- lwb->lwb_max_txg = txg;
+ lwb->lwb_alloc_txg = txg;
+ lwb->lwb_max_txg = 0;
mutex_enter(&zilog->zl_lock);
list_insert_tail(&zilog->zl_lwb_list, lwb);
+ if (state != LWB_STATE_NEW)
+ zilog->zl_last_lwb_opened = lwb;
mutex_exit(&zilog->zl_lock);
return (lwb);
}
static void
zil_free_lwb(zilog_t *zilog, lwb_t *lwb)
{
ASSERT(MUTEX_HELD(&zilog->zl_lock));
- ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock));
- VERIFY(list_is_empty(&lwb->lwb_waiters));
- VERIFY(list_is_empty(&lwb->lwb_itxs));
- ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
+ ASSERT(lwb->lwb_state == LWB_STATE_NEW ||
+ lwb->lwb_state == LWB_STATE_FLUSH_DONE);
+ ASSERT3P(lwb->lwb_child_zio, ==, NULL);
ASSERT3P(lwb->lwb_write_zio, ==, NULL);
ASSERT3P(lwb->lwb_root_zio, ==, NULL);
+ ASSERT3U(lwb->lwb_alloc_txg, <=, spa_syncing_txg(zilog->zl_spa));
ASSERT3U(lwb->lwb_max_txg, <=, spa_syncing_txg(zilog->zl_spa));
- ASSERT(lwb->lwb_state == LWB_STATE_CLOSED ||
- lwb->lwb_state == LWB_STATE_FLUSH_DONE);
+ VERIFY(list_is_empty(&lwb->lwb_itxs));
+ VERIFY(list_is_empty(&lwb->lwb_waiters));
+ ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
+ ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock));
/*
* Clear the zilog's field to indicate this lwb is no longer
* valid, and prevent use-after-free errors.
*/
if (zilog->zl_last_lwb_opened == lwb)
zilog->zl_last_lwb_opened = NULL;
kmem_cache_free(zil_lwb_cache, lwb);
}
/*
* Called when we create in-memory log transactions so that we know
* to cleanup the itxs at the end of spa_sync().
*/
static void
zilog_dirty(zilog_t *zilog, uint64_t txg)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
ASSERT(spa_writeable(zilog->zl_spa));
if (ds->ds_is_snapshot)
panic("dirtying snapshot!");
if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
/* up the hold count until we can be written out */
dmu_buf_add_ref(ds->ds_dbuf, zilog);
zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg);
}
}
/*
* Determine if the zil is dirty in the specified txg. Callers wanting to
* ensure that the dirty state does not change must hold the itxg_lock for
* the specified txg. Holding the lock will ensure that the zil cannot be
* dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current
* state.
*/
static boolean_t __maybe_unused
zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK))
return (B_TRUE);
return (B_FALSE);
}
/*
* Determine if the zil is dirty. The zil is considered dirty if it has
* any pending itx records that have not been cleaned by zil_clean().
*/
static boolean_t
zilog_is_dirty(zilog_t *zilog)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
for (int t = 0; t < TXG_SIZE; t++) {
if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Its called in zil_commit context (zil_process_commit_list()/zil_create()).
* It activates SPA_FEATURE_ZILSAXATTR feature, if its enabled.
* Check dsl_dataset_feature_is_active to avoid txg_wait_synced() on every
* zil_commit.
*/
static void
zil_commit_activate_saxattr_feature(zilog_t *zilog)
{
dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
uint64_t txg = 0;
dmu_tx_t *tx = NULL;
if (spa_feature_is_enabled(zilog->zl_spa, SPA_FEATURE_ZILSAXATTR) &&
dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL &&
!dsl_dataset_feature_is_active(ds, SPA_FEATURE_ZILSAXATTR)) {
tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_dataset_dirty(ds, tx);
txg = dmu_tx_get_txg(tx);
mutex_enter(&ds->ds_lock);
ds->ds_feature_activation[SPA_FEATURE_ZILSAXATTR] =
(void *)B_TRUE;
mutex_exit(&ds->ds_lock);
dmu_tx_commit(tx);
txg_wait_synced(zilog->zl_dmu_pool, txg);
}
}
/*
* Create an on-disk intent log.
*/
static lwb_t *
zil_create(zilog_t *zilog)
{
const zil_header_t *zh = zilog->zl_header;
lwb_t *lwb = NULL;
uint64_t txg = 0;
dmu_tx_t *tx = NULL;
blkptr_t blk;
int error = 0;
- boolean_t fastwrite = FALSE;
boolean_t slog = FALSE;
dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
/*
* Wait for any previous destroy to complete.
*/
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
ASSERT(zh->zh_claim_txg == 0);
ASSERT(zh->zh_replay_seq == 0);
blk = zh->zh_log;
/*
* Allocate an initial log block if:
* - there isn't one already
* - the existing block is the wrong endianness
*/
if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
txg = dmu_tx_get_txg(tx);
if (!BP_IS_HOLE(&blk)) {
zio_free(zilog->zl_spa, txg, &blk);
BP_ZERO(&blk);
}
error = zio_alloc_zil(zilog->zl_spa, zilog->zl_os, txg, &blk,
ZIL_MIN_BLKSZ, &slog);
- fastwrite = TRUE;
-
if (error == 0)
zil_init_log_chain(zilog, &blk);
}
/*
* Allocate a log write block (lwb) for the first log block.
*/
if (error == 0)
- lwb = zil_alloc_lwb(zilog, &blk, slog, txg, fastwrite);
+ lwb = zil_alloc_lwb(zilog, 0, &blk, slog, txg, LWB_STATE_NEW);
/*
* If we just allocated the first log block, commit our transaction
* and wait for zil_sync() to stuff the block pointer into zh_log.
* (zh is part of the MOS, so we cannot modify it in open context.)
*/
if (tx != NULL) {
/*
* If "zilsaxattr" feature is enabled on zpool, then activate
* it now when we're creating the ZIL chain. We can't wait with
* this until we write the first xattr log record because we
* need to wait for the feature activation to sync out.
*/
if (spa_feature_is_enabled(zilog->zl_spa,
SPA_FEATURE_ZILSAXATTR) && dmu_objset_type(zilog->zl_os) !=
DMU_OST_ZVOL) {
mutex_enter(&ds->ds_lock);
ds->ds_feature_activation[SPA_FEATURE_ZILSAXATTR] =
(void *)B_TRUE;
mutex_exit(&ds->ds_lock);
}
dmu_tx_commit(tx);
txg_wait_synced(zilog->zl_dmu_pool, txg);
} else {
/*
* This branch covers the case where we enable the feature on a
* zpool that has existing ZIL headers.
*/
zil_commit_activate_saxattr_feature(zilog);
}
IMPLY(spa_feature_is_enabled(zilog->zl_spa, SPA_FEATURE_ZILSAXATTR) &&
dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL,
dsl_dataset_feature_is_active(ds, SPA_FEATURE_ZILSAXATTR));
ASSERT(error != 0 || memcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
IMPLY(error == 0, lwb != NULL);
return (lwb);
}
/*
* In one tx, free all log blocks and clear the log header. If keep_first
* is set, then we're replaying a log with no content. We want to keep the
* first block, however, so that the first synchronous transaction doesn't
* require a txg_wait_synced() in zil_create(). We don't need to
* txg_wait_synced() here either when keep_first is set, because both
* zil_create() and zil_destroy() will wait for any in-progress destroys
* to complete.
* Return B_TRUE if there were any entries to replay.
*/
boolean_t
zil_destroy(zilog_t *zilog, boolean_t keep_first)
{
const zil_header_t *zh = zilog->zl_header;
lwb_t *lwb;
dmu_tx_t *tx;
uint64_t txg;
/*
* Wait for any previous destroy to complete.
*/
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
zilog->zl_old_header = *zh; /* debugging aid */
if (BP_IS_HOLE(&zh->zh_log))
return (B_FALSE);
tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
txg = dmu_tx_get_txg(tx);
mutex_enter(&zilog->zl_lock);
ASSERT3U(zilog->zl_destroy_txg, <, txg);
zilog->zl_destroy_txg = txg;
zilog->zl_keep_first = keep_first;
if (!list_is_empty(&zilog->zl_lwb_list)) {
ASSERT(zh->zh_claim_txg == 0);
VERIFY(!keep_first);
while ((lwb = list_remove_head(&zilog->zl_lwb_list)) != NULL) {
- if (lwb->lwb_fastwrite)
- metaslab_fastwrite_unmark(zilog->zl_spa,
- &lwb->lwb_blk);
if (lwb->lwb_buf != NULL)
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
- zio_free(zilog->zl_spa, txg, &lwb->lwb_blk);
+ if (!BP_IS_HOLE(&lwb->lwb_blk))
+ zio_free(zilog->zl_spa, txg, &lwb->lwb_blk);
zil_free_lwb(zilog, lwb);
}
} else if (!keep_first) {
zil_destroy_sync(zilog, tx);
}
mutex_exit(&zilog->zl_lock);
dmu_tx_commit(tx);
return (B_TRUE);
}
void
zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
{
ASSERT(list_is_empty(&zilog->zl_lwb_list));
(void) zil_parse(zilog, zil_free_log_block,
zil_free_log_record, tx, zilog->zl_header->zh_claim_txg, B_FALSE);
}
int
zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg)
{
dmu_tx_t *tx = txarg;
zilog_t *zilog;
uint64_t first_txg;
zil_header_t *zh;
objset_t *os;
int error;
error = dmu_objset_own_obj(dp, ds->ds_object,
DMU_OST_ANY, B_FALSE, B_FALSE, FTAG, &os);
if (error != 0) {
/*
* EBUSY indicates that the objset is inconsistent, in which
* case it can not have a ZIL.
*/
if (error != EBUSY) {
cmn_err(CE_WARN, "can't open objset for %llu, error %u",
(unsigned long long)ds->ds_object, error);
}
return (0);
}
zilog = dmu_objset_zil(os);
zh = zil_header_in_syncing_context(zilog);
ASSERT3U(tx->tx_txg, ==, spa_first_txg(zilog->zl_spa));
first_txg = spa_min_claim_txg(zilog->zl_spa);
/*
* If the spa_log_state is not set to be cleared, check whether
* the current uberblock is a checkpoint one and if the current
* header has been claimed before moving on.
*
* If the current uberblock is a checkpointed uberblock then
* one of the following scenarios took place:
*
* 1] We are currently rewinding to the checkpoint of the pool.
* 2] We crashed in the middle of a checkpoint rewind but we
* did manage to write the checkpointed uberblock to the
* vdev labels, so when we tried to import the pool again
* the checkpointed uberblock was selected from the import
* procedure.
*
* In both cases we want to zero out all the ZIL blocks, except
* the ones that have been claimed at the time of the checkpoint
* (their zh_claim_txg != 0). The reason is that these blocks
* may be corrupted since we may have reused their locations on
* disk after we took the checkpoint.
*
* We could try to set spa_log_state to SPA_LOG_CLEAR earlier
* when we first figure out whether the current uberblock is
* checkpointed or not. Unfortunately, that would discard all
* the logs, including the ones that are claimed, and we would
* leak space.
*/
if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR ||
(zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 &&
zh->zh_claim_txg == 0)) {
if (!BP_IS_HOLE(&zh->zh_log)) {
(void) zil_parse(zilog, zil_clear_log_block,
zil_noop_log_record, tx, first_txg, B_FALSE);
}
BP_ZERO(&zh->zh_log);
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
dsl_dataset_dirty(dmu_objset_ds(os), tx);
dmu_objset_disown(os, B_FALSE, FTAG);
return (0);
}
/*
* If we are not rewinding and opening the pool normally, then
* the min_claim_txg should be equal to the first txg of the pool.
*/
ASSERT3U(first_txg, ==, spa_first_txg(zilog->zl_spa));
/*
* Claim all log blocks if we haven't already done so, and remember
* the highest claimed sequence number. This ensures that if we can
* read only part of the log now (e.g. due to a missing device),
* but we can read the entire log later, we will not try to replay
* or destroy beyond the last block we successfully claimed.
*/
ASSERT3U(zh->zh_claim_txg, <=, first_txg);
if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
(void) zil_parse(zilog, zil_claim_log_block,
zil_claim_log_record, tx, first_txg, B_FALSE);
zh->zh_claim_txg = first_txg;
zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq;
zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq;
if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1)
zh->zh_flags |= ZIL_REPLAY_NEEDED;
zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID;
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
dsl_dataset_dirty(dmu_objset_ds(os), tx);
}
ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
dmu_objset_disown(os, B_FALSE, FTAG);
return (0);
}
/*
* Check the log by walking the log chain.
* Checksum errors are ok as they indicate the end of the chain.
* Any other error (no device or read failure) returns an error.
*/
int
zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx)
{
(void) dp;
zilog_t *zilog;
objset_t *os;
blkptr_t *bp;
int error;
ASSERT(tx == NULL);
error = dmu_objset_from_ds(ds, &os);
if (error != 0) {
cmn_err(CE_WARN, "can't open objset %llu, error %d",
(unsigned long long)ds->ds_object, error);
return (0);
}
zilog = dmu_objset_zil(os);
bp = (blkptr_t *)&zilog->zl_header->zh_log;
if (!BP_IS_HOLE(bp)) {
vdev_t *vd;
boolean_t valid = B_TRUE;
/*
* Check the first block and determine if it's on a log device
* which may have been removed or faulted prior to loading this
* pool. If so, there's no point in checking the rest of the
* log as its content should have already been synced to the
* pool.
*/
spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER);
vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0]));
if (vd->vdev_islog && vdev_is_dead(vd))
valid = vdev_log_state_valid(vd);
spa_config_exit(os->os_spa, SCL_STATE, FTAG);
if (!valid)
return (0);
/*
* Check whether the current uberblock is checkpointed (e.g.
* we are rewinding) and whether the current header has been
* claimed or not. If it hasn't then skip verifying it. We
* do this because its ZIL blocks may be part of the pool's
* state before the rewind, which is no longer valid.
*/
zil_header_t *zh = zil_header_in_syncing_context(zilog);
if (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 &&
zh->zh_claim_txg == 0)
return (0);
}
/*
* Because tx == NULL, zil_claim_log_block() will not actually claim
* any blocks, but just determine whether it is possible to do so.
* In addition to checking the log chain, zil_claim_log_block()
* will invoke zio_claim() with a done func of spa_claim_notify(),
* which will update spa_max_claim_txg. See spa_load() for details.
*/
error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx,
zilog->zl_header->zh_claim_txg ? -1ULL :
spa_min_claim_txg(os->os_spa), B_FALSE);
return ((error == ECKSUM || error == ENOENT) ? 0 : error);
}
/*
* When an itx is "skipped", this function is used to properly mark the
* waiter as "done, and signal any thread(s) waiting on it. An itx can
* be skipped (and not committed to an lwb) for a variety of reasons,
* one of them being that the itx was committed via spa_sync(), prior to
* it being committed to an lwb; this can happen if a thread calling
* zil_commit() is racing with spa_sync().
*/
static void
zil_commit_waiter_skip(zil_commit_waiter_t *zcw)
{
mutex_enter(&zcw->zcw_lock);
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
zcw->zcw_done = B_TRUE;
cv_broadcast(&zcw->zcw_cv);
mutex_exit(&zcw->zcw_lock);
}
/*
* This function is used when the given waiter is to be linked into an
* lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb.
* At this point, the waiter will no longer be referenced by the itx,
* and instead, will be referenced by the lwb.
*/
static void
zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb)
{
/*
* The lwb_waiters field of the lwb is protected by the zilog's
- * zl_lock, thus it must be held when calling this function.
+ * zl_issuer_lock while the lwb is open and zl_lock otherwise.
+ * zl_issuer_lock also protects leaving the open state.
+ * zcw_lwb setting is protected by zl_issuer_lock and state !=
+ * flush_done, which transition is protected by zl_lock.
*/
- ASSERT(MUTEX_HELD(&lwb->lwb_zilog->zl_lock));
+ ASSERT(MUTEX_HELD(&lwb->lwb_zilog->zl_issuer_lock));
+ IMPLY(lwb->lwb_state != LWB_STATE_OPENED,
+ MUTEX_HELD(&lwb->lwb_zilog->zl_lock));
+ ASSERT3S(lwb->lwb_state, !=, LWB_STATE_NEW);
+ ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
- mutex_enter(&zcw->zcw_lock);
ASSERT(!list_link_active(&zcw->zcw_node));
- ASSERT3P(zcw->zcw_lwb, ==, NULL);
- ASSERT3P(lwb, !=, NULL);
- ASSERT(lwb->lwb_state == LWB_STATE_OPENED ||
- lwb->lwb_state == LWB_STATE_ISSUED ||
- lwb->lwb_state == LWB_STATE_WRITE_DONE);
-
list_insert_tail(&lwb->lwb_waiters, zcw);
+ ASSERT3P(zcw->zcw_lwb, ==, NULL);
zcw->zcw_lwb = lwb;
- mutex_exit(&zcw->zcw_lock);
}
/*
* This function is used when zio_alloc_zil() fails to allocate a ZIL
* block, and the given waiter must be linked to the "nolwb waiters"
* list inside of zil_process_commit_list().
*/
static void
zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb)
{
- mutex_enter(&zcw->zcw_lock);
ASSERT(!list_link_active(&zcw->zcw_node));
- ASSERT3P(zcw->zcw_lwb, ==, NULL);
list_insert_tail(nolwb, zcw);
- mutex_exit(&zcw->zcw_lock);
+ ASSERT3P(zcw->zcw_lwb, ==, NULL);
}
void
zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp)
{
avl_tree_t *t = &lwb->lwb_vdev_tree;
avl_index_t where;
zil_vdev_node_t *zv, zvsearch;
int ndvas = BP_GET_NDVAS(bp);
int i;
+ ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE);
+ ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
+
if (zil_nocacheflush)
return;
mutex_enter(&lwb->lwb_vdev_lock);
for (i = 0; i < ndvas; i++) {
zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
if (avl_find(t, &zvsearch, &where) == NULL) {
zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
zv->zv_vdev = zvsearch.zv_vdev;
avl_insert(t, zv, where);
}
}
mutex_exit(&lwb->lwb_vdev_lock);
}
static void
zil_lwb_flush_defer(lwb_t *lwb, lwb_t *nlwb)
{
avl_tree_t *src = &lwb->lwb_vdev_tree;
avl_tree_t *dst = &nlwb->lwb_vdev_tree;
void *cookie = NULL;
zil_vdev_node_t *zv;
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE);
ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_WRITE_DONE);
ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
/*
* While 'lwb' is at a point in its lifetime where lwb_vdev_tree does
* not need the protection of lwb_vdev_lock (it will only be modified
* while holding zilog->zl_lock) as its writes and those of its
* children have all completed. The younger 'nlwb' may be waiting on
* future writes to additional vdevs.
*/
mutex_enter(&nlwb->lwb_vdev_lock);
/*
* Tear down the 'lwb' vdev tree, ensuring that entries which do not
* exist in 'nlwb' are moved to it, freeing any would-be duplicates.
*/
while ((zv = avl_destroy_nodes(src, &cookie)) != NULL) {
avl_index_t where;
if (avl_find(dst, zv, &where) == NULL) {
avl_insert(dst, zv, where);
} else {
kmem_free(zv, sizeof (*zv));
}
}
mutex_exit(&nlwb->lwb_vdev_lock);
}
void
zil_lwb_add_txg(lwb_t *lwb, uint64_t txg)
{
lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
}
/*
* This function is a called after all vdevs associated with a given lwb
* write have completed their DKIOCFLUSHWRITECACHE command; or as soon
* as the lwb write completes, if "zil_nocacheflush" is set. Further,
* all "previous" lwb's will have completed before this function is
* called; i.e. this function is called for all previous lwbs before
* it's called for "this" lwb (enforced via zio the dependencies
* configured in zil_lwb_set_zio_dependency()).
*
* The intention is for this function to be called as soon as the
* contents of an lwb are considered "stable" on disk, and will survive
* any sudden loss of power. At this point, any threads waiting for the
* lwb to reach this state are signalled, and the "waiter" structures
* are marked "done".
*/
static void
zil_lwb_flush_vdevs_done(zio_t *zio)
{
lwb_t *lwb = zio->io_private;
zilog_t *zilog = lwb->lwb_zilog;
zil_commit_waiter_t *zcw;
itx_t *itx;
- uint64_t txg;
- list_t itxs, waiters;
spa_config_exit(zilog->zl_spa, SCL_STATE, lwb);
- list_create(&itxs, sizeof (itx_t), offsetof(itx_t, itx_node));
- list_create(&waiters, sizeof (zil_commit_waiter_t),
- offsetof(zil_commit_waiter_t, zcw_node));
-
hrtime_t t = gethrtime() - lwb->lwb_issued_timestamp;
mutex_enter(&zilog->zl_lock);
zilog->zl_last_lwb_latency = (zilog->zl_last_lwb_latency * 7 + t) / 8;
lwb->lwb_root_zio = NULL;
+ ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE);
+ lwb->lwb_state = LWB_STATE_FLUSH_DONE;
+
if (zilog->zl_last_lwb_opened == lwb) {
/*
* Remember the highest committed log sequence number
* for ztest. We only update this value when all the log
* writes succeeded, because ztest wants to ASSERT that
* it got the whole log chain.
*/
zilog->zl_commit_lr_seq = zilog->zl_lr_seq;
}
- list_move_tail(&itxs, &lwb->lwb_itxs);
- list_move_tail(&waiters, &lwb->lwb_waiters);
- txg = lwb->lwb_issued_txg;
-
- ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE);
- lwb->lwb_state = LWB_STATE_FLUSH_DONE;
-
- mutex_exit(&zilog->zl_lock);
-
- while ((itx = list_remove_head(&itxs)) != NULL)
+ while ((itx = list_remove_head(&lwb->lwb_itxs)) != NULL)
zil_itx_destroy(itx);
- list_destroy(&itxs);
- while ((zcw = list_remove_head(&waiters)) != NULL) {
+ while ((zcw = list_remove_head(&lwb->lwb_waiters)) != NULL) {
mutex_enter(&zcw->zcw_lock);
+ ASSERT3P(zcw->zcw_lwb, ==, lwb);
zcw->zcw_lwb = NULL;
/*
* We expect any ZIO errors from child ZIOs to have been
* propagated "up" to this specific LWB's root ZIO, in
* order for this error handling to work correctly. This
* includes ZIO errors from either this LWB's write or
* flush, as well as any errors from other dependent LWBs
* (e.g. a root LWB ZIO that might be a child of this LWB).
*
* With that said, it's important to note that LWB flush
* errors are not propagated up to the LWB root ZIO.
* This is incorrect behavior, and results in VDEV flush
* errors not being handled correctly here. See the
* comment above the call to "zio_flush" for details.
*/
zcw->zcw_zio_error = zio->io_error;
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
zcw->zcw_done = B_TRUE;
cv_broadcast(&zcw->zcw_cv);
mutex_exit(&zcw->zcw_lock);
}
- list_destroy(&waiters);
+
+ uint64_t txg = lwb->lwb_issued_txg;
+
+ /* Once we drop the lock, lwb may be freed by zil_sync(). */
+ mutex_exit(&zilog->zl_lock);
mutex_enter(&zilog->zl_lwb_io_lock);
ASSERT3U(zilog->zl_lwb_inflight[txg & TXG_MASK], >, 0);
zilog->zl_lwb_inflight[txg & TXG_MASK]--;
if (zilog->zl_lwb_inflight[txg & TXG_MASK] == 0)
cv_broadcast(&zilog->zl_lwb_io_cv);
mutex_exit(&zilog->zl_lwb_io_lock);
}
/*
* Wait for the completion of all issued write/flush of that txg provided.
* It guarantees zil_lwb_flush_vdevs_done() is called and returned.
*/
static void
zil_lwb_flush_wait_all(zilog_t *zilog, uint64_t txg)
{
ASSERT3U(txg, ==, spa_syncing_txg(zilog->zl_spa));
mutex_enter(&zilog->zl_lwb_io_lock);
while (zilog->zl_lwb_inflight[txg & TXG_MASK] > 0)
cv_wait(&zilog->zl_lwb_io_cv, &zilog->zl_lwb_io_lock);
mutex_exit(&zilog->zl_lwb_io_lock);
#ifdef ZFS_DEBUG
mutex_enter(&zilog->zl_lock);
mutex_enter(&zilog->zl_lwb_io_lock);
lwb_t *lwb = list_head(&zilog->zl_lwb_list);
- while (lwb != NULL && lwb->lwb_max_txg <= txg) {
+ while (lwb != NULL) {
if (lwb->lwb_issued_txg <= txg) {
ASSERT(lwb->lwb_state != LWB_STATE_ISSUED);
ASSERT(lwb->lwb_state != LWB_STATE_WRITE_DONE);
IMPLY(lwb->lwb_issued_txg > 0,
lwb->lwb_state == LWB_STATE_FLUSH_DONE);
}
IMPLY(lwb->lwb_state == LWB_STATE_WRITE_DONE ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE,
lwb->lwb_buf == NULL);
lwb = list_next(&zilog->zl_lwb_list, lwb);
}
mutex_exit(&zilog->zl_lwb_io_lock);
mutex_exit(&zilog->zl_lock);
#endif
}
/*
* This is called when an lwb's write zio completes. The callback's
* purpose is to issue the DKIOCFLUSHWRITECACHE commands for the vdevs
* in the lwb's lwb_vdev_tree. The tree will contain the vdevs involved
* in writing out this specific lwb's data, and in the case that cache
* flushes have been deferred, vdevs involved in writing the data for
* previous lwbs. The writes corresponding to all the vdevs in the
* lwb_vdev_tree will have completed by the time this is called, due to
* the zio dependencies configured in zil_lwb_set_zio_dependency(),
* which takes deferred flushes into account. The lwb will be "done"
* once zil_lwb_flush_vdevs_done() is called, which occurs in the zio
* completion callback for the lwb's root zio.
*/
static void
zil_lwb_write_done(zio_t *zio)
{
lwb_t *lwb = zio->io_private;
spa_t *spa = zio->io_spa;
zilog_t *zilog = lwb->lwb_zilog;
avl_tree_t *t = &lwb->lwb_vdev_tree;
void *cookie = NULL;
zil_vdev_node_t *zv;
lwb_t *nlwb;
ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0);
- ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
- ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG);
- ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
- ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER);
- ASSERT(!BP_IS_GANG(zio->io_bp));
- ASSERT(!BP_IS_HOLE(zio->io_bp));
- ASSERT(BP_GET_FILL(zio->io_bp) == 0);
-
abd_free(zio->io_abd);
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
lwb->lwb_buf = NULL;
mutex_enter(&zilog->zl_lock);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_ISSUED);
lwb->lwb_state = LWB_STATE_WRITE_DONE;
+ lwb->lwb_child_zio = NULL;
lwb->lwb_write_zio = NULL;
- lwb->lwb_fastwrite = FALSE;
nlwb = list_next(&zilog->zl_lwb_list, lwb);
mutex_exit(&zilog->zl_lock);
if (avl_numnodes(t) == 0)
return;
/*
* If there was an IO error, we're not going to call zio_flush()
* on these vdevs, so we simply empty the tree and free the
* nodes. We avoid calling zio_flush() since there isn't any
* good reason for doing so, after the lwb block failed to be
* written out.
*
* Additionally, we don't perform any further error handling at
* this point (e.g. setting "zcw_zio_error" appropriately), as
* we expect that to occur in "zil_lwb_flush_vdevs_done" (thus,
* we expect any error seen here, to have been propagated to
* that function).
*/
if (zio->io_error != 0) {
while ((zv = avl_destroy_nodes(t, &cookie)) != NULL)
kmem_free(zv, sizeof (*zv));
return;
}
/*
* If this lwb does not have any threads waiting for it to
* complete, we want to defer issuing the DKIOCFLUSHWRITECACHE
* command to the vdevs written to by "this" lwb, and instead
* rely on the "next" lwb to handle the DKIOCFLUSHWRITECACHE
* command for those vdevs. Thus, we merge the vdev tree of
* "this" lwb with the vdev tree of the "next" lwb in the list,
* and assume the "next" lwb will handle flushing the vdevs (or
* deferring the flush(s) again).
*
* This is a useful performance optimization, especially for
* workloads with lots of async write activity and few sync
* write and/or fsync activity, as it has the potential to
* coalesce multiple flush commands to a vdev into one.
*/
if (list_is_empty(&lwb->lwb_waiters) && nlwb != NULL) {
zil_lwb_flush_defer(lwb, nlwb);
ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
return;
}
while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
if (vd != NULL && !vd->vdev_nowritecache) {
/*
* The "ZIO_FLAG_DONT_PROPAGATE" is currently
* always used within "zio_flush". This means,
* any errors when flushing the vdev(s), will
* (unfortunately) not be handled correctly,
* since these "zio_flush" errors will not be
* propagated up to "zil_lwb_flush_vdevs_done".
*/
zio_flush(lwb->lwb_root_zio, vd);
}
kmem_free(zv, sizeof (*zv));
}
}
+/*
+ * Build the zio dependency chain, which is used to preserve the ordering of
+ * lwb completions that is required by the semantics of the ZIL. Each new lwb
+ * zio becomes a parent of the previous lwb zio, such that the new lwb's zio
+ * cannot complete until the previous lwb's zio completes.
+ *
+ * This is required by the semantics of zil_commit(): the commit waiters
+ * attached to the lwbs will be woken in the lwb zio's completion callback,
+ * so this zio dependency graph ensures the waiters are woken in the correct
+ * order (the same order the lwbs were created).
+ */
static void
zil_lwb_set_zio_dependency(zilog_t *zilog, lwb_t *lwb)
{
- lwb_t *last_lwb_opened = zilog->zl_last_lwb_opened;
-
- ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT(MUTEX_HELD(&zilog->zl_lock));
+ lwb_t *prev_lwb = list_prev(&zilog->zl_lwb_list, lwb);
+ if (prev_lwb == NULL ||
+ prev_lwb->lwb_state == LWB_STATE_FLUSH_DONE)
+ return;
+
/*
- * The zilog's "zl_last_lwb_opened" field is used to build the
- * lwb/zio dependency chain, which is used to preserve the
- * ordering of lwb completions that is required by the semantics
- * of the ZIL. Each new lwb zio becomes a parent of the
- * "previous" lwb zio, such that the new lwb's zio cannot
- * complete until the "previous" lwb's zio completes.
+ * If the previous lwb's write hasn't already completed, we also want
+ * to order the completion of the lwb write zios (above, we only order
+ * the completion of the lwb root zios). This is required because of
+ * how we can defer the DKIOCFLUSHWRITECACHE commands for each lwb.
+ *
+ * When the DKIOCFLUSHWRITECACHE commands are deferred, the previous
+ * lwb will rely on this lwb to flush the vdevs written to by that
+ * previous lwb. Thus, we need to ensure this lwb doesn't issue the
+ * flush until after the previous lwb's write completes. We ensure
+ * this ordering by setting the zio parent/child relationship here.
*
- * This is required by the semantics of zil_commit(); the commit
- * waiters attached to the lwbs will be woken in the lwb zio's
- * completion callback, so this zio dependency graph ensures the
- * waiters are woken in the correct order (the same order the
- * lwbs were created).
+ * Without this relationship on the lwb's write zio, it's possible
+ * for this lwb's write to complete prior to the previous lwb's write
+ * completing; and thus, the vdevs for the previous lwb would be
+ * flushed prior to that lwb's data being written to those vdevs (the
+ * vdevs are flushed in the lwb write zio's completion handler,
+ * zil_lwb_write_done()).
*/
- if (last_lwb_opened != NULL &&
- last_lwb_opened->lwb_state != LWB_STATE_FLUSH_DONE) {
- ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED ||
- last_lwb_opened->lwb_state == LWB_STATE_ISSUED ||
- last_lwb_opened->lwb_state == LWB_STATE_WRITE_DONE);
-
- ASSERT3P(last_lwb_opened->lwb_root_zio, !=, NULL);
- zio_add_child(lwb->lwb_root_zio,
- last_lwb_opened->lwb_root_zio);
-
- /*
- * If the previous lwb's write hasn't already completed,
- * we also want to order the completion of the lwb write
- * zios (above, we only order the completion of the lwb
- * root zios). This is required because of how we can
- * defer the DKIOCFLUSHWRITECACHE commands for each lwb.
- *
- * When the DKIOCFLUSHWRITECACHE commands are deferred,
- * the previous lwb will rely on this lwb to flush the
- * vdevs written to by that previous lwb. Thus, we need
- * to ensure this lwb doesn't issue the flush until
- * after the previous lwb's write completes. We ensure
- * this ordering by setting the zio parent/child
- * relationship here.
- *
- * Without this relationship on the lwb's write zio,
- * it's possible for this lwb's write to complete prior
- * to the previous lwb's write completing; and thus, the
- * vdevs for the previous lwb would be flushed prior to
- * that lwb's data being written to those vdevs (the
- * vdevs are flushed in the lwb write zio's completion
- * handler, zil_lwb_write_done()).
- */
- if (last_lwb_opened->lwb_state != LWB_STATE_WRITE_DONE) {
- ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED ||
- last_lwb_opened->lwb_state == LWB_STATE_ISSUED);
-
- ASSERT3P(last_lwb_opened->lwb_write_zio, !=, NULL);
- zio_add_child(lwb->lwb_write_zio,
- last_lwb_opened->lwb_write_zio);
- }
+ if (prev_lwb->lwb_state == LWB_STATE_ISSUED) {
+ ASSERT3P(prev_lwb->lwb_write_zio, !=, NULL);
+ zio_add_child(lwb->lwb_write_zio, prev_lwb->lwb_write_zio);
+ } else {
+ ASSERT3S(prev_lwb->lwb_state, ==, LWB_STATE_WRITE_DONE);
}
+
+ ASSERT3P(prev_lwb->lwb_root_zio, !=, NULL);
+ zio_add_child(lwb->lwb_root_zio, prev_lwb->lwb_root_zio);
}
/*
* This function's purpose is to "open" an lwb such that it is ready to
- * accept new itxs being committed to it. To do this, the lwb's zio
- * structures are created, and linked to the lwb. This function is
- * idempotent; if the passed in lwb has already been opened, this
- * function is essentially a no-op.
+ * accept new itxs being committed to it. This function is idempotent; if
+ * the passed in lwb has already been opened, it is essentially a no-op.
*/
static void
zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb)
{
- zbookmark_phys_t zb;
- zio_priority_t prio;
-
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
- ASSERT3P(lwb, !=, NULL);
- EQUIV(lwb->lwb_root_zio == NULL, lwb->lwb_state == LWB_STATE_CLOSED);
- EQUIV(lwb->lwb_root_zio != NULL, lwb->lwb_state == LWB_STATE_OPENED);
- if (lwb->lwb_root_zio != NULL)
+ if (lwb->lwb_state != LWB_STATE_NEW) {
+ ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
return;
-
- lwb->lwb_root_zio = zio_root(zilog->zl_spa,
- zil_lwb_flush_vdevs_done, lwb, ZIO_FLAG_CANFAIL);
-
- abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf,
- BP_GET_LSIZE(&lwb->lwb_blk));
-
- if (!lwb->lwb_slog || zilog->zl_cur_used <= zil_slog_bulk)
- prio = ZIO_PRIORITY_SYNC_WRITE;
- else
- prio = ZIO_PRIORITY_ASYNC_WRITE;
-
- SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET],
- ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
- lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]);
-
- /* Lock so zil_sync() doesn't fastwrite_unmark after zio is created */
- mutex_enter(&zilog->zl_lock);
- if (!lwb->lwb_fastwrite) {
- metaslab_fastwrite_mark(zilog->zl_spa, &lwb->lwb_blk);
- lwb->lwb_fastwrite = 1;
}
- lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio, zilog->zl_spa, 0,
- &lwb->lwb_blk, lwb_abd, BP_GET_LSIZE(&lwb->lwb_blk),
- zil_lwb_write_done, lwb, prio,
- ZIO_FLAG_CANFAIL | ZIO_FLAG_FASTWRITE, &zb);
-
+ mutex_enter(&zilog->zl_lock);
lwb->lwb_state = LWB_STATE_OPENED;
-
- zil_lwb_set_zio_dependency(zilog, lwb);
zilog->zl_last_lwb_opened = lwb;
mutex_exit(&zilog->zl_lock);
}
/*
* Define a limited set of intent log block sizes.
*
* These must be a multiple of 4KB. Note only the amount used (again
* aligned to 4KB) actually gets written. However, we can't always just
* allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted.
*/
static const struct {
uint64_t limit;
uint64_t blksz;
} zil_block_buckets[] = {
{ 4096, 4096 }, /* non TX_WRITE */
{ 8192 + 4096, 8192 + 4096 }, /* database */
{ 32768 + 4096, 32768 + 4096 }, /* NFS writes */
{ 65536 + 4096, 65536 + 4096 }, /* 64KB writes */
{ 131072, 131072 }, /* < 128KB writes */
{ 131072 +4096, 65536 + 4096 }, /* 128KB writes */
{ UINT64_MAX, SPA_OLD_MAXBLOCKSIZE}, /* > 128KB writes */
};
/*
* Maximum block size used by the ZIL. This is picked up when the ZIL is
* initialized. Otherwise this should not be used directly; see
* zl_max_block_size instead.
*/
static uint_t zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE;
/*
* Close the log block for being issued and allocate the next one.
* Has to be called under zl_issuer_lock to chain more lwbs.
*/
static lwb_t *
-zil_lwb_write_close(zilog_t *zilog, lwb_t *lwb, list_t *ilwbs)
+zil_lwb_write_close(zilog_t *zilog, lwb_t *lwb, lwb_state_t state)
{
- lwb_t *nlwb = NULL;
- zil_chain_t *zilc;
- spa_t *spa = zilog->zl_spa;
- blkptr_t *bp;
- dmu_tx_t *tx;
- uint64_t txg;
- uint64_t zil_blksz;
- int i, error;
- boolean_t slog;
+ int i;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
- ASSERT3P(lwb->lwb_root_zio, !=, NULL);
- ASSERT3P(lwb->lwb_write_zio, !=, NULL);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
+ lwb->lwb_state = LWB_STATE_CLOSED;
/*
- * If this lwb includes indirect writes, we have to commit before
- * creating the transaction, otherwise we may end up in dead lock.
- */
- if (lwb->lwb_indirect) {
- for (itx_t *itx = list_head(&lwb->lwb_itxs); itx;
- itx = list_next(&lwb->lwb_itxs, itx))
- zil_lwb_commit(zilog, lwb, itx);
- lwb->lwb_nused = lwb->lwb_nfilled;
- }
-
- /*
- * Allocate the next block and save its address in this block
- * before writing it in order to establish the log chain.
- */
-
- tx = dmu_tx_create(zilog->zl_os);
-
- /*
- * Since we are not going to create any new dirty data, and we
- * can even help with clearing the existing dirty data, we
- * should not be subject to the dirty data based delays. We
- * use TXG_NOTHROTTLE to bypass the delay mechanism.
+ * If there was an allocation failure then returned NULL will trigger
+ * zil_commit_writer_stall() at the caller. This is inherently racy,
+ * since allocation may not have happened yet.
*/
- VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE));
-
- dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
- txg = dmu_tx_get_txg(tx);
-
- mutex_enter(&zilog->zl_lwb_io_lock);
- lwb->lwb_issued_txg = txg;
- zilog->zl_lwb_inflight[txg & TXG_MASK]++;
- zilog->zl_lwb_max_issued_txg = MAX(txg, zilog->zl_lwb_max_issued_txg);
- mutex_exit(&zilog->zl_lwb_io_lock);
+ if (lwb->lwb_error != 0)
+ return (NULL);
/*
* Log blocks are pre-allocated. Here we select the size of the next
* block, based on size used in the last block.
* - first find the smallest bucket that will fit the block from a
* limited set of block sizes. This is because it's faster to write
* blocks allocated from the same metaslab as they are adjacent or
* close.
* - next find the maximum from the new suggested size and an array of
* previous sizes. This lessens a picket fence effect of wrongly
* guessing the size if we have a stream of say 2k, 64k, 2k, 64k
* requests.
*
* Note we only write what is used, but we can't just allocate
* the maximum block size because we can exhaust the available
* pool log space.
*/
- zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
+ uint64_t zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
for (i = 0; zil_blksz > zil_block_buckets[i].limit; i++)
continue;
zil_blksz = MIN(zil_block_buckets[i].blksz, zilog->zl_max_block_size);
zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz;
for (i = 0; i < ZIL_PREV_BLKS; i++)
zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]);
DTRACE_PROBE3(zil__block__size, zilog_t *, zilog,
uint64_t, zil_blksz,
uint64_t, zilog->zl_prev_blks[zilog->zl_prev_rotor]);
zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1);
- if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2)
- zilc = (zil_chain_t *)lwb->lwb_buf;
- else
- zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz);
- bp = &zilc->zc_next_blk;
- BP_ZERO(bp);
- error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, zil_blksz, &slog);
- if (error == 0) {
- ASSERT3U(bp->blk_birth, ==, txg);
- bp->blk_cksum = lwb->lwb_blk.blk_cksum;
- bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
+ return (zil_alloc_lwb(zilog, zil_blksz, NULL, 0, 0, state));
+}
- /*
- * Allocate a new log write block (lwb).
- */
- nlwb = zil_alloc_lwb(zilog, bp, slog, txg, TRUE);
- }
+/*
+ * Finalize previously closed block and issue the write zio.
+ */
+static void
+zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
+{
+ spa_t *spa = zilog->zl_spa;
+ zil_chain_t *zilc;
+ boolean_t slog;
+ zbookmark_phys_t zb;
+ zio_priority_t prio;
+ int error;
- lwb->lwb_state = LWB_STATE_ISSUED;
+ ASSERT3S(lwb->lwb_state, ==, LWB_STATE_CLOSED);
- dmu_tx_commit(tx);
+ /* Actually fill the lwb with the data. */
+ for (itx_t *itx = list_head(&lwb->lwb_itxs); itx;
+ itx = list_next(&lwb->lwb_itxs, itx))
+ zil_lwb_commit(zilog, lwb, itx);
+ lwb->lwb_nused = lwb->lwb_nfilled;
+
+ lwb->lwb_root_zio = zio_root(spa, zil_lwb_flush_vdevs_done, lwb,
+ ZIO_FLAG_CANFAIL);
/*
- * We need to acquire the config lock for the lwb to issue it later.
- * However, if we already have a queue of closed parent lwbs already
- * holding the config lock (but not yet issued), we can't block here
- * waiting on the lock or we will deadlock. In that case we must
- * first issue to parent IOs before waiting on the lock.
+ * The lwb is now ready to be issued, but it can be only if it already
+ * got its block pointer allocated or the allocation has failed.
+ * Otherwise leave it as-is, relying on some other thread to issue it
+ * after allocating its block pointer via calling zil_lwb_write_issue()
+ * for the previous lwb(s) in the chain.
*/
- if (ilwbs && !list_is_empty(ilwbs)) {
- if (!spa_config_tryenter(spa, SCL_STATE, lwb, RW_READER)) {
- lwb_t *tlwb;
- while ((tlwb = list_remove_head(ilwbs)) != NULL)
- zil_lwb_write_issue(zilog, tlwb);
- spa_config_enter(spa, SCL_STATE, lwb, RW_READER);
+ mutex_enter(&zilog->zl_lock);
+ lwb->lwb_state = LWB_STATE_READY;
+ if (BP_IS_HOLE(&lwb->lwb_blk) && lwb->lwb_error == 0) {
+ mutex_exit(&zilog->zl_lock);
+ return;
+ }
+ mutex_exit(&zilog->zl_lock);
+
+next_lwb:
+ if (lwb->lwb_slim)
+ zilc = (zil_chain_t *)lwb->lwb_buf;
+ else
+ zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_nmax);
+ int wsz = lwb->lwb_sz;
+ if (lwb->lwb_error == 0) {
+ abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf, lwb->lwb_sz);
+ if (!lwb->lwb_slog || zilog->zl_cur_used <= zil_slog_bulk)
+ prio = ZIO_PRIORITY_SYNC_WRITE;
+ else
+ prio = ZIO_PRIORITY_ASYNC_WRITE;
+ SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET],
+ ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
+ lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]);
+ lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio, spa, 0,
+ &lwb->lwb_blk, lwb_abd, lwb->lwb_sz, zil_lwb_write_done,
+ lwb, prio, ZIO_FLAG_CANFAIL, &zb);
+ zil_lwb_add_block(lwb, &lwb->lwb_blk);
+
+ if (lwb->lwb_slim) {
+ /* For Slim ZIL only write what is used. */
+ wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ,
+ int);
+ ASSERT3S(wsz, <=, lwb->lwb_sz);
+ zio_shrink(lwb->lwb_write_zio, wsz);
+ wsz = lwb->lwb_write_zio->io_size;
}
+ memset(lwb->lwb_buf + lwb->lwb_nused, 0, wsz - lwb->lwb_nused);
+ zilc->zc_pad = 0;
+ zilc->zc_nused = lwb->lwb_nused;
+ zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum;
} else {
- spa_config_enter(spa, SCL_STATE, lwb, RW_READER);
+ /*
+ * We can't write the lwb if there was an allocation failure,
+ * so create a null zio instead just to maintain dependencies.
+ */
+ lwb->lwb_write_zio = zio_null(lwb->lwb_root_zio, spa, NULL,
+ zil_lwb_write_done, lwb, ZIO_FLAG_CANFAIL);
+ lwb->lwb_write_zio->io_error = lwb->lwb_error;
}
-
- if (ilwbs)
- list_insert_tail(ilwbs, lwb);
+ if (lwb->lwb_child_zio)
+ zio_add_child(lwb->lwb_write_zio, lwb->lwb_child_zio);
/*
- * If there was an allocation failure then nlwb will be null which
- * forces a txg_wait_synced().
+ * Open transaction to allocate the next block pointer.
*/
- return (nlwb);
-}
+ dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
+ VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE));
+ dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
+ uint64_t txg = dmu_tx_get_txg(tx);
-/*
- * Finalize previously closed block and issue the write zio.
- * Does not require locking.
- */
-static void
-zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
-{
- zil_chain_t *zilc;
- int wsz;
-
- /* Actually fill the lwb with the data if not yet. */
- if (!lwb->lwb_indirect) {
- for (itx_t *itx = list_head(&lwb->lwb_itxs); itx;
- itx = list_next(&lwb->lwb_itxs, itx))
- zil_lwb_commit(zilog, lwb, itx);
- lwb->lwb_nused = lwb->lwb_nfilled;
+ /*
+ * Allocate next the block pointer unless we are already in error.
+ */
+ lwb_t *nlwb = list_next(&zilog->zl_lwb_list, lwb);
+ blkptr_t *bp = &zilc->zc_next_blk;
+ BP_ZERO(bp);
+ error = lwb->lwb_error;
+ if (error == 0) {
+ error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, nlwb->lwb_sz,
+ &slog);
+ }
+ if (error == 0) {
+ ASSERT3U(bp->blk_birth, ==, txg);
+ BP_SET_CHECKSUM(bp, nlwb->lwb_slim ? ZIO_CHECKSUM_ZILOG2 :
+ ZIO_CHECKSUM_ZILOG);
+ bp->blk_cksum = lwb->lwb_blk.blk_cksum;
+ bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
}
- if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
- /* For Slim ZIL only write what is used. */
- wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, int);
- ASSERT3S(wsz, <=, lwb->lwb_sz);
- zio_shrink(lwb->lwb_write_zio, wsz);
- wsz = lwb->lwb_write_zio->io_size;
+ /*
+ * Reduce TXG open time by incrementing inflight counter and committing
+ * the transaciton. zil_sync() will wait for it to return to zero.
+ */
+ mutex_enter(&zilog->zl_lwb_io_lock);
+ lwb->lwb_issued_txg = txg;
+ zilog->zl_lwb_inflight[txg & TXG_MASK]++;
+ zilog->zl_lwb_max_issued_txg = MAX(txg, zilog->zl_lwb_max_issued_txg);
+ mutex_exit(&zilog->zl_lwb_io_lock);
+ dmu_tx_commit(tx);
- zilc = (zil_chain_t *)lwb->lwb_buf;
- } else {
- wsz = lwb->lwb_sz;
- zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz);
- }
- zilc->zc_pad = 0;
- zilc->zc_nused = lwb->lwb_nused;
- zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum;
+ spa_config_enter(spa, SCL_STATE, lwb, RW_READER);
/*
- * clear unused data for security
+ * We've completed all potentially blocking operations. Update the
+ * nlwb and allow it proceed without possible lock order reversals.
*/
- memset(lwb->lwb_buf + lwb->lwb_nused, 0, wsz - lwb->lwb_nused);
+ mutex_enter(&zilog->zl_lock);
+ zil_lwb_set_zio_dependency(zilog, lwb);
+ lwb->lwb_state = LWB_STATE_ISSUED;
+
+ if (nlwb) {
+ nlwb->lwb_blk = *bp;
+ nlwb->lwb_error = error;
+ nlwb->lwb_slog = slog;
+ nlwb->lwb_alloc_txg = txg;
+ if (nlwb->lwb_state != LWB_STATE_READY)
+ nlwb = NULL;
+ }
+ mutex_exit(&zilog->zl_lock);
if (lwb->lwb_slog) {
ZIL_STAT_BUMP(zilog, zil_itx_metaslab_slog_count);
ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_bytes,
lwb->lwb_nused);
ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_write,
wsz);
ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_alloc,
BP_GET_LSIZE(&lwb->lwb_blk));
} else {
ZIL_STAT_BUMP(zilog, zil_itx_metaslab_normal_count);
ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_bytes,
lwb->lwb_nused);
ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_write,
wsz);
ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_alloc,
BP_GET_LSIZE(&lwb->lwb_blk));
}
- ASSERT(spa_config_held(zilog->zl_spa, SCL_STATE, RW_READER));
- zil_lwb_add_block(lwb, &lwb->lwb_blk);
lwb->lwb_issued_timestamp = gethrtime();
- zio_nowait(lwb->lwb_root_zio);
+ if (lwb->lwb_child_zio)
+ zio_nowait(lwb->lwb_child_zio);
zio_nowait(lwb->lwb_write_zio);
+ zio_nowait(lwb->lwb_root_zio);
+
+ /*
+ * If nlwb was ready when we gave it the block pointer,
+ * it is on us to issue it and possibly following ones.
+ */
+ lwb = nlwb;
+ if (lwb)
+ goto next_lwb;
}
/*
* Maximum amount of data that can be put into single log block.
*/
uint64_t
zil_max_log_data(zilog_t *zilog, size_t hdrsize)
{
return (zilog->zl_max_block_size - sizeof (zil_chain_t) - hdrsize);
}
/*
* Maximum amount of log space we agree to waste to reduce number of
* WR_NEED_COPY chunks to reduce zl_get_data() overhead (~12%).
*/
static inline uint64_t
zil_max_waste_space(zilog_t *zilog)
{
return (zil_max_log_data(zilog, sizeof (lr_write_t)) / 8);
}
/*
* Maximum amount of write data for WR_COPIED. For correctness, consumers
* must fall back to WR_NEED_COPY if we can't fit the entire record into one
* maximum sized log block, because each WR_COPIED record must fit in a
* single log block. For space efficiency, we want to fit two records into a
* max-sized log block.
*/
uint64_t
zil_max_copied_data(zilog_t *zilog)
{
return ((zilog->zl_max_block_size - sizeof (zil_chain_t)) / 2 -
sizeof (lr_write_t));
}
/*
* Estimate space needed in the lwb for the itx. Allocate more lwbs or
* split the itx as needed, but don't touch the actual transaction data.
* Has to be called under zl_issuer_lock to call zil_lwb_write_close()
* to chain more lwbs.
*/
static lwb_t *
zil_lwb_assign(zilog_t *zilog, lwb_t *lwb, itx_t *itx, list_t *ilwbs)
{
itx_t *citx;
lr_t *lr, *clr;
lr_write_t *lrw;
uint64_t dlen, dnow, lwb_sp, reclen, max_log_data;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb, !=, NULL);
ASSERT3P(lwb->lwb_buf, !=, NULL);
zil_lwb_write_open(zilog, lwb);
lr = &itx->itx_lr;
lrw = (lr_write_t *)lr;
/*
* A commit itx doesn't represent any on-disk state; instead
* it's simply used as a place holder on the commit list, and
* provides a mechanism for attaching a "commit waiter" onto the
* correct lwb (such that the waiter can be signalled upon
* completion of that lwb). Thus, we don't process this itx's
* log record if it's a commit itx (these itx's don't have log
* records), and instead link the itx's waiter onto the lwb's
* list of waiters.
*
* For more details, see the comment above zil_commit().
*/
if (lr->lrc_txtype == TX_COMMIT) {
- mutex_enter(&zilog->zl_lock);
zil_commit_waiter_link_lwb(itx->itx_private, lwb);
- itx->itx_private = NULL;
- mutex_exit(&zilog->zl_lock);
list_insert_tail(&lwb->lwb_itxs, itx);
return (lwb);
}
if (lr->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) {
dlen = P2ROUNDUP_TYPED(
lrw->lr_length, sizeof (uint64_t), uint64_t);
} else {
dlen = 0;
}
reclen = lr->lrc_reclen;
zilog->zl_cur_used += (reclen + dlen);
cont:
/*
* If this record won't fit in the current log block, start a new one.
* For WR_NEED_COPY optimize layout for minimal number of chunks.
*/
- lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
+ lwb_sp = lwb->lwb_nmax - lwb->lwb_nused;
max_log_data = zil_max_log_data(zilog, sizeof (lr_write_t));
if (reclen > lwb_sp || (reclen + dlen > lwb_sp &&
lwb_sp < zil_max_waste_space(zilog) &&
(dlen % max_log_data == 0 ||
lwb_sp < reclen + dlen % max_log_data))) {
- lwb = zil_lwb_write_close(zilog, lwb, ilwbs);
+ list_insert_tail(ilwbs, lwb);
+ lwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_OPENED);
if (lwb == NULL)
return (NULL);
- zil_lwb_write_open(zilog, lwb);
- lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
+ lwb_sp = lwb->lwb_nmax - lwb->lwb_nused;
/*
* There must be enough space in the new, empty log block to
* hold reclen. For WR_COPIED, we need to fit the whole
* record in one block, and reclen is the header size + the
* data size. For WR_NEED_COPY, we can create multiple
* records, splitting the data into multiple blocks, so we
* only need to fit one word of data per block; in this case
* reclen is just the header size (no data).
*/
ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp);
}
dnow = MIN(dlen, lwb_sp - reclen);
if (dlen > dnow) {
ASSERT3U(lr->lrc_txtype, ==, TX_WRITE);
ASSERT3U(itx->itx_wr_state, ==, WR_NEED_COPY);
citx = zil_itx_clone(itx);
clr = &citx->itx_lr;
lr_write_t *clrw = (lr_write_t *)clr;
clrw->lr_length = dnow;
lrw->lr_offset += dnow;
lrw->lr_length -= dnow;
} else {
citx = itx;
clr = lr;
}
/*
* We're actually making an entry, so update lrc_seq to be the
* log record sequence number. Note that this is generally not
* equal to the itx sequence number because not all transactions
* are synchronous, and sometimes spa_sync() gets there first.
*/
clr->lrc_seq = ++zilog->zl_lr_seq;
lwb->lwb_nused += reclen + dnow;
- ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz);
+ ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_nmax);
ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)));
zil_lwb_add_txg(lwb, lr->lrc_txg);
list_insert_tail(&lwb->lwb_itxs, citx);
dlen -= dnow;
if (dlen > 0) {
zilog->zl_cur_used += reclen;
goto cont;
}
- /*
- * We have to really issue all queued LWBs before we may have to
- * wait for a txg sync. Otherwise we may end up in a dead lock.
- */
- if (lr->lrc_txtype == TX_WRITE) {
- boolean_t frozen = lr->lrc_txg > spa_freeze_txg(zilog->zl_spa);
- if (frozen || itx->itx_wr_state == WR_INDIRECT) {
- lwb_t *tlwb;
- while ((tlwb = list_remove_head(ilwbs)) != NULL)
- zil_lwb_write_issue(zilog, tlwb);
- }
- if (itx->itx_wr_state == WR_INDIRECT)
- lwb->lwb_indirect = B_TRUE;
- if (frozen)
- txg_wait_synced(zilog->zl_dmu_pool, lr->lrc_txg);
- }
+ if (lr->lrc_txtype == TX_WRITE &&
+ lr->lrc_txg > spa_freeze_txg(zilog->zl_spa))
+ txg_wait_synced(zilog->zl_dmu_pool, lr->lrc_txg);
return (lwb);
}
/*
* Fill the actual transaction data into the lwb, following zil_lwb_assign().
* Does not require locking.
*/
static void
zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx)
{
lr_t *lr, *lrb;
lr_write_t *lrw, *lrwb;
char *lr_buf;
uint64_t dlen, reclen;
lr = &itx->itx_lr;
lrw = (lr_write_t *)lr;
if (lr->lrc_txtype == TX_COMMIT)
return;
if (lr->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) {
dlen = P2ROUNDUP_TYPED(
lrw->lr_length, sizeof (uint64_t), uint64_t);
} else {
dlen = 0;
}
reclen = lr->lrc_reclen;
ASSERT3U(reclen + dlen, <=, lwb->lwb_nused - lwb->lwb_nfilled);
lr_buf = lwb->lwb_buf + lwb->lwb_nfilled;
memcpy(lr_buf, lr, reclen);
lrb = (lr_t *)lr_buf; /* Like lr, but inside lwb. */
lrwb = (lr_write_t *)lrb; /* Like lrw, but inside lwb. */
ZIL_STAT_BUMP(zilog, zil_itx_count);
/*
* If it's a write, fetch the data or get its blkptr as appropriate.
*/
if (lr->lrc_txtype == TX_WRITE) {
if (itx->itx_wr_state == WR_COPIED) {
ZIL_STAT_BUMP(zilog, zil_itx_copied_count);
ZIL_STAT_INCR(zilog, zil_itx_copied_bytes,
lrw->lr_length);
} else {
char *dbuf;
int error;
if (itx->itx_wr_state == WR_NEED_COPY) {
dbuf = lr_buf + reclen;
lrb->lrc_reclen += dlen;
ZIL_STAT_BUMP(zilog, zil_itx_needcopy_count);
ZIL_STAT_INCR(zilog, zil_itx_needcopy_bytes,
dlen);
} else {
ASSERT3S(itx->itx_wr_state, ==, WR_INDIRECT);
dbuf = NULL;
ZIL_STAT_BUMP(zilog, zil_itx_indirect_count);
ZIL_STAT_INCR(zilog, zil_itx_indirect_bytes,
lrw->lr_length);
+ if (lwb->lwb_child_zio == NULL) {
+ lwb->lwb_child_zio = zio_root(
+ zilog->zl_spa, NULL, NULL,
+ ZIO_FLAG_CANFAIL);
+ }
}
/*
- * We pass in the "lwb_write_zio" rather than
- * "lwb_root_zio" so that the "lwb_write_zio"
- * becomes the parent of any zio's created by
- * the "zl_get_data" callback. The vdevs are
- * flushed after the "lwb_write_zio" completes,
- * so we want to make sure that completion
- * callback waits for these additional zio's,
- * such that the vdevs used by those zio's will
- * be included in the lwb's vdev tree, and those
- * vdevs will be properly flushed. If we passed
- * in "lwb_root_zio" here, then these additional
- * vdevs may not be flushed; e.g. if these zio's
- * completed after "lwb_write_zio" completed.
+ * The "lwb_child_zio" we pass in will become a child of
+ * "lwb_write_zio", when one is created, so one will be
+ * a parent of any zio's created by the "zl_get_data".
+ * This way "lwb_write_zio" will first wait for children
+ * block pointers before own writing, and then for their
+ * writing completion before the vdev cache flushing.
*/
error = zilog->zl_get_data(itx->itx_private,
itx->itx_gen, lrwb, dbuf, lwb,
- lwb->lwb_write_zio);
+ lwb->lwb_child_zio);
if (dbuf != NULL && error == 0) {
/* Zero any padding bytes in the last block. */
memset((char *)dbuf + lrwb->lr_length, 0,
dlen - lrwb->lr_length);
}
/*
* Typically, the only return values we should see from
* ->zl_get_data() are 0, EIO, ENOENT, EEXIST or
* EALREADY. However, it is also possible to see other
* error values such as ENOSPC or EINVAL from
* dmu_read() -> dnode_hold() -> dnode_hold_impl() or
* ENXIO as well as a multitude of others from the
* block layer through dmu_buf_hold() -> dbuf_read()
* -> zio_wait(), as well as through dmu_read() ->
* dnode_hold() -> dnode_hold_impl() -> dbuf_read() ->
* zio_wait(). When these errors happen, we can assume
* that neither an immediate write nor an indirect
* write occurred, so we need to fall back to
* txg_wait_synced(). This is unusual, so we print to
* dmesg whenever one of these errors occurs.
*/
switch (error) {
case 0:
break;
default:
cmn_err(CE_WARN, "zil_lwb_commit() received "
"unexpected error %d from ->zl_get_data()"
". Falling back to txg_wait_synced().",
error);
zfs_fallthrough;
case EIO:
- if (lwb->lwb_indirect) {
- txg_wait_synced(zilog->zl_dmu_pool,
- lr->lrc_txg);
- } else {
- lwb->lwb_write_zio->io_error = error;
- }
+ txg_wait_synced(zilog->zl_dmu_pool,
+ lr->lrc_txg);
zfs_fallthrough;
case ENOENT:
zfs_fallthrough;
case EEXIST:
zfs_fallthrough;
case EALREADY:
return;
}
}
}
lwb->lwb_nfilled += reclen + dlen;
ASSERT3S(lwb->lwb_nfilled, <=, lwb->lwb_nused);
ASSERT0(P2PHASE(lwb->lwb_nfilled, sizeof (uint64_t)));
}
itx_t *
zil_itx_create(uint64_t txtype, size_t olrsize)
{
size_t itxsize, lrsize;
itx_t *itx;
lrsize = P2ROUNDUP_TYPED(olrsize, sizeof (uint64_t), size_t);
itxsize = offsetof(itx_t, itx_lr) + lrsize;
itx = zio_data_buf_alloc(itxsize);
itx->itx_lr.lrc_txtype = txtype;
itx->itx_lr.lrc_reclen = lrsize;
itx->itx_lr.lrc_seq = 0; /* defensive */
memset((char *)&itx->itx_lr + olrsize, 0, lrsize - olrsize);
itx->itx_sync = B_TRUE; /* default is synchronous */
itx->itx_callback = NULL;
itx->itx_callback_data = NULL;
itx->itx_size = itxsize;
return (itx);
}
static itx_t *
zil_itx_clone(itx_t *oitx)
{
itx_t *itx = zio_data_buf_alloc(oitx->itx_size);
memcpy(itx, oitx, oitx->itx_size);
itx->itx_callback = NULL;
itx->itx_callback_data = NULL;
return (itx);
}
void
zil_itx_destroy(itx_t *itx)
{
IMPLY(itx->itx_lr.lrc_txtype == TX_COMMIT, itx->itx_callback == NULL);
IMPLY(itx->itx_callback != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
if (itx->itx_callback != NULL)
itx->itx_callback(itx->itx_callback_data);
zio_data_buf_free(itx, itx->itx_size);
}
/*
* Free up the sync and async itxs. The itxs_t has already been detached
* so no locks are needed.
*/
static void
zil_itxg_clean(void *arg)
{
itx_t *itx;
list_t *list;
avl_tree_t *t;
void *cookie;
itxs_t *itxs = arg;
itx_async_node_t *ian;
list = &itxs->i_sync_list;
while ((itx = list_remove_head(list)) != NULL) {
/*
* In the general case, commit itxs will not be found
* here, as they'll be committed to an lwb via
* zil_lwb_assign(), and free'd in that function. Having
* said that, it is still possible for commit itxs to be
* found here, due to the following race:
*
* - a thread calls zil_commit() which assigns the
* commit itx to a per-txg i_sync_list
* - zil_itxg_clean() is called (e.g. via spa_sync())
* while the waiter is still on the i_sync_list
*
* There's nothing to prevent syncing the txg while the
* waiter is on the i_sync_list. This normally doesn't
* happen because spa_sync() is slower than zil_commit(),
* but if zil_commit() calls txg_wait_synced() (e.g.
* because zil_create() or zil_commit_writer_stall() is
* called) we will hit this case.
*/
if (itx->itx_lr.lrc_txtype == TX_COMMIT)
zil_commit_waiter_skip(itx->itx_private);
zil_itx_destroy(itx);
}
cookie = NULL;
t = &itxs->i_async_tree;
while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
list = &ian->ia_list;
while ((itx = list_remove_head(list)) != NULL) {
/* commit itxs should never be on the async lists. */
ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
zil_itx_destroy(itx);
}
list_destroy(list);
kmem_free(ian, sizeof (itx_async_node_t));
}
avl_destroy(t);
kmem_free(itxs, sizeof (itxs_t));
}
static int
zil_aitx_compare(const void *x1, const void *x2)
{
const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid;
const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid;
return (TREE_CMP(o1, o2));
}
/*
* Remove all async itx with the given oid.
*/
void
zil_remove_async(zilog_t *zilog, uint64_t oid)
{
uint64_t otxg, txg;
itx_async_node_t *ian;
avl_tree_t *t;
avl_index_t where;
list_t clean_list;
itx_t *itx;
ASSERT(oid != 0);
list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
else
otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_txg != txg) {
mutex_exit(&itxg->itxg_lock);
continue;
}
/*
* Locate the object node and append its list.
*/
t = &itxg->itxg_itxs->i_async_tree;
ian = avl_find(t, &oid, &where);
if (ian != NULL)
list_move_tail(&clean_list, &ian->ia_list);
mutex_exit(&itxg->itxg_lock);
}
while ((itx = list_remove_head(&clean_list)) != NULL) {
/* commit itxs should never be on the async lists. */
ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
zil_itx_destroy(itx);
}
list_destroy(&clean_list);
}
void
zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
{
uint64_t txg;
itxg_t *itxg;
itxs_t *itxs, *clean = NULL;
/*
* Ensure the data of a renamed file is committed before the rename.
*/
if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME)
zil_async_to_sync(zilog, itx->itx_oid);
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
txg = ZILTEST_TXG;
else
txg = dmu_tx_get_txg(tx);
itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
itxs = itxg->itxg_itxs;
if (itxg->itxg_txg != txg) {
if (itxs != NULL) {
/*
* The zil_clean callback hasn't got around to cleaning
* this itxg. Save the itxs for release below.
* This should be rare.
*/
zfs_dbgmsg("zil_itx_assign: missed itx cleanup for "
"txg %llu", (u_longlong_t)itxg->itxg_txg);
clean = itxg->itxg_itxs;
}
itxg->itxg_txg = txg;
itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t),
KM_SLEEP);
list_create(&itxs->i_sync_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
avl_create(&itxs->i_async_tree, zil_aitx_compare,
sizeof (itx_async_node_t),
offsetof(itx_async_node_t, ia_node));
}
if (itx->itx_sync) {
list_insert_tail(&itxs->i_sync_list, itx);
} else {
avl_tree_t *t = &itxs->i_async_tree;
uint64_t foid =
LR_FOID_GET_OBJ(((lr_ooo_t *)&itx->itx_lr)->lr_foid);
itx_async_node_t *ian;
avl_index_t where;
ian = avl_find(t, &foid, &where);
if (ian == NULL) {
ian = kmem_alloc(sizeof (itx_async_node_t),
KM_SLEEP);
list_create(&ian->ia_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
ian->ia_foid = foid;
avl_insert(t, ian, where);
}
list_insert_tail(&ian->ia_list, itx);
}
itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
/*
* We don't want to dirty the ZIL using ZILTEST_TXG, because
* zil_clean() will never be called using ZILTEST_TXG. Thus, we
* need to be careful to always dirty the ZIL using the "real"
* TXG (not itxg_txg) even when the SPA is frozen.
*/
zilog_dirty(zilog, dmu_tx_get_txg(tx));
mutex_exit(&itxg->itxg_lock);
/* Release the old itxs now we've dropped the lock */
if (clean != NULL)
zil_itxg_clean(clean);
}
/*
* If there are any in-memory intent log transactions which have now been
* synced then start up a taskq to free them. We should only do this after we
* have written out the uberblocks (i.e. txg has been committed) so that
* don't inadvertently clean out in-memory log records that would be required
* by zil_commit().
*/
void
zil_clean(zilog_t *zilog, uint64_t synced_txg)
{
itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
itxs_t *clean_me;
ASSERT3U(synced_txg, <, ZILTEST_TXG);
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) {
mutex_exit(&itxg->itxg_lock);
return;
}
ASSERT3U(itxg->itxg_txg, <=, synced_txg);
ASSERT3U(itxg->itxg_txg, !=, 0);
clean_me = itxg->itxg_itxs;
itxg->itxg_itxs = NULL;
itxg->itxg_txg = 0;
mutex_exit(&itxg->itxg_lock);
/*
* Preferably start a task queue to free up the old itxs but
* if taskq_dispatch can't allocate resources to do that then
* free it in-line. This should be rare. Note, using TQ_SLEEP
* created a bad performance problem.
*/
ASSERT3P(zilog->zl_dmu_pool, !=, NULL);
ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL);
taskqid_t id = taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq,
zil_itxg_clean, clean_me, TQ_NOSLEEP);
if (id == TASKQID_INVALID)
zil_itxg_clean(clean_me);
}
/*
* This function will traverse the queue of itxs that need to be
* committed, and move them onto the ZIL's zl_itx_commit_list.
*/
static uint64_t
zil_get_commit_list(zilog_t *zilog)
{
uint64_t otxg, txg, wtxg = 0;
list_t *commit_list = &zilog->zl_itx_commit_list;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
else
otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
/*
* This is inherently racy, since there is nothing to prevent
* the last synced txg from changing. That's okay since we'll
* only commit things in the future.
*/
for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_txg != txg) {
mutex_exit(&itxg->itxg_lock);
continue;
}
/*
* If we're adding itx records to the zl_itx_commit_list,
* then the zil better be dirty in this "txg". We can assert
* that here since we're holding the itxg_lock which will
* prevent spa_sync from cleaning it. Once we add the itxs
* to the zl_itx_commit_list we must commit it to disk even
* if it's unnecessary (i.e. the txg was synced).
*/
ASSERT(zilog_is_dirty_in_txg(zilog, txg) ||
spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
list_t *sync_list = &itxg->itxg_itxs->i_sync_list;
if (unlikely(zilog->zl_suspend > 0)) {
/*
* ZIL was just suspended, but we lost the race.
* Allow all earlier itxs to be committed, but ask
* caller to do txg_wait_synced(txg) for any new.
*/
if (!list_is_empty(sync_list))
wtxg = MAX(wtxg, txg);
} else {
list_move_tail(commit_list, sync_list);
}
mutex_exit(&itxg->itxg_lock);
}
return (wtxg);
}
/*
* Move the async itxs for a specified object to commit into sync lists.
*/
void
zil_async_to_sync(zilog_t *zilog, uint64_t foid)
{
uint64_t otxg, txg;
itx_async_node_t *ian;
avl_tree_t *t;
avl_index_t where;
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
else
otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
/*
* This is inherently racy, since there is nothing to prevent
* the last synced txg from changing.
*/
for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_txg != txg) {
mutex_exit(&itxg->itxg_lock);
continue;
}
/*
* If a foid is specified then find that node and append its
* list. Otherwise walk the tree appending all the lists
* to the sync list. We add to the end rather than the
* beginning to ensure the create has happened.
*/
t = &itxg->itxg_itxs->i_async_tree;
if (foid != 0) {
ian = avl_find(t, &foid, &where);
if (ian != NULL) {
list_move_tail(&itxg->itxg_itxs->i_sync_list,
&ian->ia_list);
}
} else {
void *cookie = NULL;
while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
list_move_tail(&itxg->itxg_itxs->i_sync_list,
&ian->ia_list);
list_destroy(&ian->ia_list);
kmem_free(ian, sizeof (itx_async_node_t));
}
}
mutex_exit(&itxg->itxg_lock);
}
}
/*
* This function will prune commit itxs that are at the head of the
* commit list (it won't prune past the first non-commit itx), and
* either: a) attach them to the last lwb that's still pending
* completion, or b) skip them altogether.
*
* This is used as a performance optimization to prevent commit itxs
* from generating new lwbs when it's unnecessary to do so.
*/
static void
zil_prune_commit_list(zilog_t *zilog)
{
itx_t *itx;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) {
lr_t *lrc = &itx->itx_lr;
if (lrc->lrc_txtype != TX_COMMIT)
break;
mutex_enter(&zilog->zl_lock);
lwb_t *last_lwb = zilog->zl_last_lwb_opened;
if (last_lwb == NULL ||
last_lwb->lwb_state == LWB_STATE_FLUSH_DONE) {
/*
* All of the itxs this waiter was waiting on
* must have already completed (or there were
* never any itx's for it to wait on), so it's
* safe to skip this waiter and mark it done.
*/
zil_commit_waiter_skip(itx->itx_private);
} else {
zil_commit_waiter_link_lwb(itx->itx_private, last_lwb);
- itx->itx_private = NULL;
}
mutex_exit(&zilog->zl_lock);
list_remove(&zilog->zl_itx_commit_list, itx);
zil_itx_destroy(itx);
}
IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
}
static void
zil_commit_writer_stall(zilog_t *zilog)
{
/*
* When zio_alloc_zil() fails to allocate the next lwb block on
* disk, we must call txg_wait_synced() to ensure all of the
* lwbs in the zilog's zl_lwb_list are synced and then freed (in
* zil_sync()), such that any subsequent ZIL writer (i.e. a call
* to zil_process_commit_list()) will have to call zil_create(),
* and start a new ZIL chain.
*
* Since zil_alloc_zil() failed, the lwb that was previously
* issued does not have a pointer to the "next" lwb on disk.
* Thus, if another ZIL writer thread was to allocate the "next"
* on-disk lwb, that block could be leaked in the event of a
* crash (because the previous lwb on-disk would not point to
* it).
*
* We must hold the zilog's zl_issuer_lock while we do this, to
* ensure no new threads enter zil_process_commit_list() until
* all lwb's in the zl_lwb_list have been synced and freed
* (which is achieved via the txg_wait_synced() call).
*/
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
txg_wait_synced(zilog->zl_dmu_pool, 0);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
}
/*
* This function will traverse the commit list, creating new lwbs as
* needed, and committing the itxs from the commit list to these newly
* created lwbs. Additionally, as a new lwb is created, the previous
* lwb will be issued to the zio layer to be written to disk.
*/
static void
zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
{
spa_t *spa = zilog->zl_spa;
list_t nolwb_itxs;
list_t nolwb_waiters;
lwb_t *lwb, *plwb;
itx_t *itx;
boolean_t first = B_TRUE;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
/*
* Return if there's nothing to commit before we dirty the fs by
* calling zil_create().
*/
if (list_is_empty(&zilog->zl_itx_commit_list))
return;
list_create(&nolwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node));
list_create(&nolwb_waiters, sizeof (zil_commit_waiter_t),
offsetof(zil_commit_waiter_t, zcw_node));
lwb = list_tail(&zilog->zl_lwb_list);
if (lwb == NULL) {
lwb = zil_create(zilog);
} else {
/*
* Activate SPA_FEATURE_ZILSAXATTR for the cases where ZIL will
* have already been created (zl_lwb_list not empty).
*/
zil_commit_activate_saxattr_feature(zilog);
- ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
- ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE);
- ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
- first = (lwb->lwb_state != LWB_STATE_OPENED) &&
+ ASSERT(lwb->lwb_state == LWB_STATE_NEW ||
+ lwb->lwb_state == LWB_STATE_OPENED);
+ first = (lwb->lwb_state == LWB_STATE_NEW) &&
((plwb = list_prev(&zilog->zl_lwb_list, lwb)) == NULL ||
plwb->lwb_state == LWB_STATE_FLUSH_DONE);
}
while ((itx = list_remove_head(&zilog->zl_itx_commit_list)) != NULL) {
lr_t *lrc = &itx->itx_lr;
uint64_t txg = lrc->lrc_txg;
ASSERT3U(txg, !=, 0);
if (lrc->lrc_txtype == TX_COMMIT) {
DTRACE_PROBE2(zil__process__commit__itx,
zilog_t *, zilog, itx_t *, itx);
} else {
DTRACE_PROBE2(zil__process__normal__itx,
zilog_t *, zilog, itx_t *, itx);
}
boolean_t synced = txg <= spa_last_synced_txg(spa);
boolean_t frozen = txg > spa_freeze_txg(spa);
/*
* If the txg of this itx has already been synced out, then
* we don't need to commit this itx to an lwb. This is
* because the data of this itx will have already been
* written to the main pool. This is inherently racy, and
* it's still ok to commit an itx whose txg has already
* been synced; this will result in a write that's
* unnecessary, but will do no harm.
*
* With that said, we always want to commit TX_COMMIT itxs
* to an lwb, regardless of whether or not that itx's txg
* has been synced out. We do this to ensure any OPENED lwb
* will always have at least one zil_commit_waiter_t linked
* to the lwb.
*
* As a counter-example, if we skipped TX_COMMIT itx's
* whose txg had already been synced, the following
* situation could occur if we happened to be racing with
* spa_sync:
*
* 1. We commit a non-TX_COMMIT itx to an lwb, where the
* itx's txg is 10 and the last synced txg is 9.
* 2. spa_sync finishes syncing out txg 10.
* 3. We move to the next itx in the list, it's a TX_COMMIT
* whose txg is 10, so we skip it rather than committing
* it to the lwb used in (1).
*
* If the itx that is skipped in (3) is the last TX_COMMIT
* itx in the commit list, than it's possible for the lwb
* used in (1) to remain in the OPENED state indefinitely.
*
* To prevent the above scenario from occurring, ensuring
* that once an lwb is OPENED it will transition to ISSUED
* and eventually DONE, we always commit TX_COMMIT itx's to
* an lwb here, even if that itx's txg has already been
* synced.
*
* Finally, if the pool is frozen, we _always_ commit the
* itx. The point of freezing the pool is to prevent data
* from being written to the main pool via spa_sync, and
* instead rely solely on the ZIL to persistently store the
* data; i.e. when the pool is frozen, the last synced txg
* value can't be trusted.
*/
if (frozen || !synced || lrc->lrc_txtype == TX_COMMIT) {
if (lwb != NULL) {
lwb = zil_lwb_assign(zilog, lwb, itx, ilwbs);
if (lwb == NULL) {
list_insert_tail(&nolwb_itxs, itx);
} else if ((zcw->zcw_lwb != NULL &&
zcw->zcw_lwb != lwb) || zcw->zcw_done) {
/*
* Our lwb is done, leave the rest of
* itx list to somebody else who care.
*/
first = B_FALSE;
break;
}
} else {
if (lrc->lrc_txtype == TX_COMMIT) {
zil_commit_waiter_link_nolwb(
itx->itx_private, &nolwb_waiters);
}
list_insert_tail(&nolwb_itxs, itx);
}
} else {
ASSERT3S(lrc->lrc_txtype, !=, TX_COMMIT);
zil_itx_destroy(itx);
}
}
if (lwb == NULL) {
/*
* This indicates zio_alloc_zil() failed to allocate the
* "next" lwb on-disk. When this happens, we must stall
* the ZIL write pipeline; see the comment within
* zil_commit_writer_stall() for more details.
*/
while ((lwb = list_remove_head(ilwbs)) != NULL)
zil_lwb_write_issue(zilog, lwb);
zil_commit_writer_stall(zilog);
/*
* Additionally, we have to signal and mark the "nolwb"
* waiters as "done" here, since without an lwb, we
* can't do this via zil_lwb_flush_vdevs_done() like
* normal.
*/
zil_commit_waiter_t *zcw;
while ((zcw = list_remove_head(&nolwb_waiters)) != NULL)
zil_commit_waiter_skip(zcw);
/*
* And finally, we have to destroy the itx's that
* couldn't be committed to an lwb; this will also call
* the itx's callback if one exists for the itx.
*/
while ((itx = list_remove_head(&nolwb_itxs)) != NULL)
zil_itx_destroy(itx);
} else {
ASSERT(list_is_empty(&nolwb_waiters));
ASSERT3P(lwb, !=, NULL);
- ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
- ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE);
- ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
+ ASSERT(lwb->lwb_state == LWB_STATE_NEW ||
+ lwb->lwb_state == LWB_STATE_OPENED);
/*
* At this point, the ZIL block pointed at by the "lwb"
- * variable is in one of the following states: "closed"
- * or "open".
+ * variable is in "new" or "opened" state.
*
- * If it's "closed", then no itxs have been committed to
- * it, so there's no point in issuing its zio (i.e. it's
- * "empty").
+ * If it's "new", then no itxs have been committed to it, so
+ * there's no point in issuing its zio (i.e. it's "empty").
*
- * If it's "open", then it contains one or more itxs that
+ * If it's "opened", then it contains one or more itxs that
* eventually need to be committed to stable storage. In
* this case we intentionally do not issue the lwb's zio
* to disk yet, and instead rely on one of the following
* two mechanisms for issuing the zio:
*
- * 1. Ideally, there will be more ZIL activity occurring
- * on the system, such that this function will be
- * immediately called again (not necessarily by the same
- * thread) and this lwb's zio will be issued via
- * zil_lwb_assign(). This way, the lwb is guaranteed to
- * be "full" when it is issued to disk, and we'll make
- * use of the lwb's size the best we can.
+ * 1. Ideally, there will be more ZIL activity occurring on
+ * the system, such that this function will be immediately
+ * called again by different thread and this lwb will be
+ * closed by zil_lwb_assign(). This way, the lwb will be
+ * "full" when it is issued to disk, and we'll make use of
+ * the lwb's size the best we can.
*
* 2. If there isn't sufficient ZIL activity occurring on
- * the system, such that this lwb's zio isn't issued via
- * zil_lwb_assign(), zil_commit_waiter() will issue the
- * lwb's zio. If this occurs, the lwb is not guaranteed
+ * the system, zil_commit_waiter() will close it and issue
+ * the zio. If this occurs, the lwb is not guaranteed
* to be "full" by the time its zio is issued, and means
* the size of the lwb was "too large" given the amount
* of ZIL activity occurring on the system at that time.
*
* We do this for a couple of reasons:
*
* 1. To try and reduce the number of IOPs needed to
* write the same number of itxs. If an lwb has space
* available in its buffer for more itxs, and more itxs
* will be committed relatively soon (relative to the
* latency of performing a write), then it's beneficial
* to wait for these "next" itxs. This way, more itxs
* can be committed to stable storage with fewer writes.
*
* 2. To try and use the largest lwb block size that the
* incoming rate of itxs can support. Again, this is to
* try and pack as many itxs into as few lwbs as
* possible, without significantly impacting the latency
* of each individual itx.
*
* If we had no already running or open LWBs, it can be
* the workload is single-threaded. And if the ZIL write
* latency is very small or if the LWB is almost full, it
* may be cheaper to bypass the delay.
*/
if (lwb->lwb_state == LWB_STATE_OPENED && first) {
hrtime_t sleep = zilog->zl_last_lwb_latency *
zfs_commit_timeout_pct / 100;
if (sleep < zil_min_commit_timeout ||
- lwb->lwb_sz - lwb->lwb_nused < lwb->lwb_sz / 8) {
- lwb = zil_lwb_write_close(zilog, lwb, ilwbs);
+ lwb->lwb_nmax - lwb->lwb_nused <
+ lwb->lwb_nmax / 8) {
+ list_insert_tail(ilwbs, lwb);
+ lwb = zil_lwb_write_close(zilog, lwb,
+ LWB_STATE_NEW);
zilog->zl_cur_used = 0;
if (lwb == NULL) {
while ((lwb = list_remove_head(ilwbs))
!= NULL)
zil_lwb_write_issue(zilog, lwb);
zil_commit_writer_stall(zilog);
}
}
}
}
}
/*
* This function is responsible for ensuring the passed in commit waiter
* (and associated commit itx) is committed to an lwb. If the waiter is
* not already committed to an lwb, all itxs in the zilog's queue of
* itxs will be processed. The assumption is the passed in waiter's
* commit itx will found in the queue just like the other non-commit
* itxs, such that when the entire queue is processed, the waiter will
* have been committed to an lwb.
*
* The lwb associated with the passed in waiter is not guaranteed to
* have been issued by the time this function completes. If the lwb is
* not issued, we rely on future calls to zil_commit_writer() to issue
* the lwb, or the timeout mechanism found in zil_commit_waiter().
*/
static uint64_t
zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
list_t ilwbs;
lwb_t *lwb;
uint64_t wtxg = 0;
ASSERT(!MUTEX_HELD(&zilog->zl_lock));
ASSERT(spa_writeable(zilog->zl_spa));
list_create(&ilwbs, sizeof (lwb_t), offsetof(lwb_t, lwb_issue_node));
mutex_enter(&zilog->zl_issuer_lock);
if (zcw->zcw_lwb != NULL || zcw->zcw_done) {
/*
* It's possible that, while we were waiting to acquire
* the "zl_issuer_lock", another thread committed this
* waiter to an lwb. If that occurs, we bail out early,
* without processing any of the zilog's queue of itxs.
*
* On certain workloads and system configurations, the
* "zl_issuer_lock" can become highly contended. In an
* attempt to reduce this contention, we immediately drop
* the lock if the waiter has already been processed.
*
* We've measured this optimization to reduce CPU spent
* contending on this lock by up to 5%, using a system
* with 32 CPUs, low latency storage (~50 usec writes),
* and 1024 threads performing sync writes.
*/
goto out;
}
ZIL_STAT_BUMP(zilog, zil_commit_writer_count);
wtxg = zil_get_commit_list(zilog);
zil_prune_commit_list(zilog);
zil_process_commit_list(zilog, zcw, &ilwbs);
out:
mutex_exit(&zilog->zl_issuer_lock);
while ((lwb = list_remove_head(&ilwbs)) != NULL)
zil_lwb_write_issue(zilog, lwb);
list_destroy(&ilwbs);
return (wtxg);
}
static void
zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT(MUTEX_HELD(&zcw->zcw_lock));
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
lwb_t *lwb = zcw->zcw_lwb;
ASSERT3P(lwb, !=, NULL);
- ASSERT3S(lwb->lwb_state, !=, LWB_STATE_CLOSED);
+ ASSERT3S(lwb->lwb_state, !=, LWB_STATE_NEW);
/*
* If the lwb has already been issued by another thread, we can
* immediately return since there's no work to be done (the
* point of this function is to issue the lwb). Additionally, we
* do this prior to acquiring the zl_issuer_lock, to avoid
* acquiring it when it's not necessary to do so.
*/
- if (lwb->lwb_state == LWB_STATE_ISSUED ||
- lwb->lwb_state == LWB_STATE_WRITE_DONE ||
- lwb->lwb_state == LWB_STATE_FLUSH_DONE)
+ if (lwb->lwb_state != LWB_STATE_OPENED)
return;
/*
* In order to call zil_lwb_write_close() we must hold the
* zilog's "zl_issuer_lock". We can't simply acquire that lock,
* since we're already holding the commit waiter's "zcw_lock",
* and those two locks are acquired in the opposite order
* elsewhere.
*/
mutex_exit(&zcw->zcw_lock);
mutex_enter(&zilog->zl_issuer_lock);
mutex_enter(&zcw->zcw_lock);
/*
* Since we just dropped and re-acquired the commit waiter's
* lock, we have to re-check to see if the waiter was marked
* "done" during that process. If the waiter was marked "done",
* the "lwb" pointer is no longer valid (it can be free'd after
* the waiter is marked "done"), so without this check we could
* wind up with a use-after-free error below.
*/
if (zcw->zcw_done) {
- lwb = NULL;
- goto out;
+ mutex_exit(&zilog->zl_issuer_lock);
+ return;
}
ASSERT3P(lwb, ==, zcw->zcw_lwb);
/*
* We've already checked this above, but since we hadn't acquired
* the zilog's zl_issuer_lock, we have to perform this check a
* second time while holding the lock.
*
* We don't need to hold the zl_lock since the lwb cannot transition
- * from OPENED to ISSUED while we hold the zl_issuer_lock. The lwb
- * _can_ transition from ISSUED to DONE, but it's OK to race with
+ * from OPENED to CLOSED while we hold the zl_issuer_lock. The lwb
+ * _can_ transition from CLOSED to DONE, but it's OK to race with
* that transition since we treat the lwb the same, whether it's in
- * the ISSUED or DONE states.
+ * the CLOSED, ISSUED or DONE states.
*
* The important thing, is we treat the lwb differently depending on
- * if it's ISSUED or OPENED, and block any other threads that might
- * attempt to issue this lwb. For that reason we hold the
+ * if it's OPENED or CLOSED, and block any other threads that might
+ * attempt to close/issue this lwb. For that reason we hold the
* zl_issuer_lock when checking the lwb_state; we must not call
- * zil_lwb_write_close() if the lwb had already been issued.
+ * zil_lwb_write_close() if the lwb had already been closed/issued.
*
* See the comment above the lwb_state_t structure definition for
* more details on the lwb states, and locking requirements.
*/
- if (lwb->lwb_state == LWB_STATE_ISSUED ||
- lwb->lwb_state == LWB_STATE_WRITE_DONE ||
- lwb->lwb_state == LWB_STATE_FLUSH_DONE) {
- lwb = NULL;
- goto out;
+ if (lwb->lwb_state != LWB_STATE_OPENED) {
+ mutex_exit(&zilog->zl_issuer_lock);
+ return;
}
- ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
+ /*
+ * We do not need zcw_lock once we hold zl_issuer_lock and know lwb
+ * is still open. But we have to drop it to avoid a deadlock in case
+ * callback of zio issued by zil_lwb_write_issue() try to get it,
+ * while zil_lwb_write_issue() is blocked on attempt to issue next
+ * lwb it found in LWB_STATE_READY state.
+ */
+ mutex_exit(&zcw->zcw_lock);
/*
* As described in the comments above zil_commit_waiter() and
* zil_process_commit_list(), we need to issue this lwb's zio
* since we've reached the commit waiter's timeout and it still
* hasn't been issued.
*/
- lwb_t *nlwb = zil_lwb_write_close(zilog, lwb, NULL);
+ lwb_t *nlwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_NEW);
- ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED);
+ ASSERT3S(lwb->lwb_state, ==, LWB_STATE_CLOSED);
/*
* Since the lwb's zio hadn't been issued by the time this thread
* reached its timeout, we reset the zilog's "zl_cur_used" field
* to influence the zil block size selection algorithm.
*
* By having to issue the lwb's zio here, it means the size of the
* lwb was too large, given the incoming throughput of itxs. By
* setting "zl_cur_used" to zero, we communicate this fact to the
* block size selection algorithm, so it can take this information
* into account, and potentially select a smaller size for the
* next lwb block that is allocated.
*/
zilog->zl_cur_used = 0;
if (nlwb == NULL) {
/*
* When zil_lwb_write_close() returns NULL, this
* indicates zio_alloc_zil() failed to allocate the
* "next" lwb on-disk. When this occurs, the ZIL write
* pipeline must be stalled; see the comment within the
* zil_commit_writer_stall() function for more details.
- *
- * We must drop the commit waiter's lock prior to
- * calling zil_commit_writer_stall() or else we can wind
- * up with the following deadlock:
- *
- * - This thread is waiting for the txg to sync while
- * holding the waiter's lock; txg_wait_synced() is
- * used within txg_commit_writer_stall().
- *
- * - The txg can't sync because it is waiting for this
- * lwb's zio callback to call dmu_tx_commit().
- *
- * - The lwb's zio callback can't call dmu_tx_commit()
- * because it's blocked trying to acquire the waiter's
- * lock, which occurs prior to calling dmu_tx_commit()
*/
- mutex_exit(&zcw->zcw_lock);
zil_lwb_write_issue(zilog, lwb);
- lwb = NULL;
zil_commit_writer_stall(zilog);
- mutex_enter(&zcw->zcw_lock);
- }
-
-out:
- mutex_exit(&zilog->zl_issuer_lock);
- if (lwb)
+ mutex_exit(&zilog->zl_issuer_lock);
+ } else {
+ mutex_exit(&zilog->zl_issuer_lock);
zil_lwb_write_issue(zilog, lwb);
- ASSERT(MUTEX_HELD(&zcw->zcw_lock));
+ }
+ mutex_enter(&zcw->zcw_lock);
}
/*
* This function is responsible for performing the following two tasks:
*
* 1. its primary responsibility is to block until the given "commit
* waiter" is considered "done".
*
* 2. its secondary responsibility is to issue the zio for the lwb that
* the given "commit waiter" is waiting on, if this function has
* waited "long enough" and the lwb is still in the "open" state.
*
* Given a sufficient amount of itxs being generated and written using
* the ZIL, the lwb's zio will be issued via the zil_lwb_assign()
* function. If this does not occur, this secondary responsibility will
* ensure the lwb is issued even if there is not other synchronous
* activity on the system.
*
* For more details, see zil_process_commit_list(); more specifically,
* the comment at the bottom of that function.
*/
static void
zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
ASSERT(!MUTEX_HELD(&zilog->zl_lock));
ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT(spa_writeable(zilog->zl_spa));
mutex_enter(&zcw->zcw_lock);
/*
* The timeout is scaled based on the lwb latency to avoid
* significantly impacting the latency of each individual itx.
* For more details, see the comment at the bottom of the
* zil_process_commit_list() function.
*/
int pct = MAX(zfs_commit_timeout_pct, 1);
hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100;
hrtime_t wakeup = gethrtime() + sleep;
boolean_t timedout = B_FALSE;
while (!zcw->zcw_done) {
ASSERT(MUTEX_HELD(&zcw->zcw_lock));
lwb_t *lwb = zcw->zcw_lwb;
/*
* Usually, the waiter will have a non-NULL lwb field here,
* but it's possible for it to be NULL as a result of
* zil_commit() racing with spa_sync().
*
* When zil_clean() is called, it's possible for the itxg
* list (which may be cleaned via a taskq) to contain
* commit itxs. When this occurs, the commit waiters linked
* off of these commit itxs will not be committed to an
* lwb. Additionally, these commit waiters will not be
* marked done until zil_commit_waiter_skip() is called via
* zil_itxg_clean().
*
* Thus, it's possible for this commit waiter (i.e. the
* "zcw" variable) to be found in this "in between" state;
* where it's "zcw_lwb" field is NULL, and it hasn't yet
* been skipped, so it's "zcw_done" field is still B_FALSE.
*/
- IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_CLOSED);
+ IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_NEW);
if (lwb != NULL && lwb->lwb_state == LWB_STATE_OPENED) {
ASSERT3B(timedout, ==, B_FALSE);
/*
* If the lwb hasn't been issued yet, then we
* need to wait with a timeout, in case this
* function needs to issue the lwb after the
* timeout is reached; responsibility (2) from
* the comment above this function.
*/
int rc = cv_timedwait_hires(&zcw->zcw_cv,
&zcw->zcw_lock, wakeup, USEC2NSEC(1),
CALLOUT_FLAG_ABSOLUTE);
if (rc != -1 || zcw->zcw_done)
continue;
timedout = B_TRUE;
zil_commit_waiter_timeout(zilog, zcw);
if (!zcw->zcw_done) {
/*
* If the commit waiter has already been
* marked "done", it's possible for the
* waiter's lwb structure to have already
* been freed. Thus, we can only reliably
* make these assertions if the waiter
* isn't done.
*/
ASSERT3P(lwb, ==, zcw->zcw_lwb);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED);
}
} else {
/*
* If the lwb isn't open, then it must have already
* been issued. In that case, there's no need to
* use a timeout when waiting for the lwb to
* complete.
*
* Additionally, if the lwb is NULL, the waiter
* will soon be signaled and marked done via
* zil_clean() and zil_itxg_clean(), so no timeout
* is required.
*/
IMPLY(lwb != NULL,
+ lwb->lwb_state == LWB_STATE_CLOSED ||
+ lwb->lwb_state == LWB_STATE_READY ||
lwb->lwb_state == LWB_STATE_ISSUED ||
lwb->lwb_state == LWB_STATE_WRITE_DONE ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE);
cv_wait(&zcw->zcw_cv, &zcw->zcw_lock);
}
}
mutex_exit(&zcw->zcw_lock);
}
static zil_commit_waiter_t *
zil_alloc_commit_waiter(void)
{
zil_commit_waiter_t *zcw = kmem_cache_alloc(zil_zcw_cache, KM_SLEEP);
cv_init(&zcw->zcw_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&zcw->zcw_lock, NULL, MUTEX_DEFAULT, NULL);
list_link_init(&zcw->zcw_node);
zcw->zcw_lwb = NULL;
zcw->zcw_done = B_FALSE;
zcw->zcw_zio_error = 0;
return (zcw);
}
static void
zil_free_commit_waiter(zil_commit_waiter_t *zcw)
{
ASSERT(!list_link_active(&zcw->zcw_node));
ASSERT3P(zcw->zcw_lwb, ==, NULL);
ASSERT3B(zcw->zcw_done, ==, B_TRUE);
mutex_destroy(&zcw->zcw_lock);
cv_destroy(&zcw->zcw_cv);
kmem_cache_free(zil_zcw_cache, zcw);
}
/*
* This function is used to create a TX_COMMIT itx and assign it. This
* way, it will be linked into the ZIL's list of synchronous itxs, and
* then later committed to an lwb (or skipped) when
* zil_process_commit_list() is called.
*/
static void
zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
/*
* Since we are not going to create any new dirty data, and we
* can even help with clearing the existing dirty data, we
* should not be subject to the dirty data based delays. We
* use TXG_NOTHROTTLE to bypass the delay mechanism.
*/
VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE));
itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t));
itx->itx_sync = B_TRUE;
itx->itx_private = zcw;
zil_itx_assign(zilog, itx, tx);
dmu_tx_commit(tx);
}
/*
* Commit ZFS Intent Log transactions (itxs) to stable storage.
*
* When writing ZIL transactions to the on-disk representation of the
* ZIL, the itxs are committed to a Log Write Block (lwb). Multiple
* itxs can be committed to a single lwb. Once a lwb is written and
* committed to stable storage (i.e. the lwb is written, and vdevs have
* been flushed), each itx that was committed to that lwb is also
* considered to be committed to stable storage.
*
* When an itx is committed to an lwb, the log record (lr_t) contained
* by the itx is copied into the lwb's zio buffer, and once this buffer
* is written to disk, it becomes an on-disk ZIL block.
*
* As itxs are generated, they're inserted into the ZIL's queue of
* uncommitted itxs. The semantics of zil_commit() are such that it will
* block until all itxs that were in the queue when it was called, are
* committed to stable storage.
*
* If "foid" is zero, this means all "synchronous" and "asynchronous"
* itxs, for all objects in the dataset, will be committed to stable
* storage prior to zil_commit() returning. If "foid" is non-zero, all
* "synchronous" itxs for all objects, but only "asynchronous" itxs
* that correspond to the foid passed in, will be committed to stable
* storage prior to zil_commit() returning.
*
* Generally speaking, when zil_commit() is called, the consumer doesn't
* actually care about _all_ of the uncommitted itxs. Instead, they're
* simply trying to waiting for a specific itx to be committed to disk,
* but the interface(s) for interacting with the ZIL don't allow such
* fine-grained communication. A better interface would allow a consumer
* to create and assign an itx, and then pass a reference to this itx to
* zil_commit(); such that zil_commit() would return as soon as that
* specific itx was committed to disk (instead of waiting for _all_
* itxs to be committed).
*
* When a thread calls zil_commit() a special "commit itx" will be
* generated, along with a corresponding "waiter" for this commit itx.
* zil_commit() will wait on this waiter's CV, such that when the waiter
* is marked done, and signaled, zil_commit() will return.
*
* This commit itx is inserted into the queue of uncommitted itxs. This
* provides an easy mechanism for determining which itxs were in the
* queue prior to zil_commit() having been called, and which itxs were
* added after zil_commit() was called.
*
* The commit itx is special; it doesn't have any on-disk representation.
* When a commit itx is "committed" to an lwb, the waiter associated
* with it is linked onto the lwb's list of waiters. Then, when that lwb
* completes, each waiter on the lwb's list is marked done and signaled
* -- allowing the thread waiting on the waiter to return from zil_commit().
*
* It's important to point out a few critical factors that allow us
* to make use of the commit itxs, commit waiters, per-lwb lists of
* commit waiters, and zio completion callbacks like we're doing:
*
* 1. The list of waiters for each lwb is traversed, and each commit
* waiter is marked "done" and signaled, in the zio completion
* callback of the lwb's zio[*].
*
* * Actually, the waiters are signaled in the zio completion
* callback of the root zio for the DKIOCFLUSHWRITECACHE commands
* that are sent to the vdevs upon completion of the lwb zio.
*
* 2. When the itxs are inserted into the ZIL's queue of uncommitted
* itxs, the order in which they are inserted is preserved[*]; as
* itxs are added to the queue, they are added to the tail of
* in-memory linked lists.
*
* When committing the itxs to lwbs (to be written to disk), they
* are committed in the same order in which the itxs were added to
* the uncommitted queue's linked list(s); i.e. the linked list of
* itxs to commit is traversed from head to tail, and each itx is
* committed to an lwb in that order.
*
* * To clarify:
*
* - the order of "sync" itxs is preserved w.r.t. other
* "sync" itxs, regardless of the corresponding objects.
* - the order of "async" itxs is preserved w.r.t. other
* "async" itxs corresponding to the same object.
* - the order of "async" itxs is *not* preserved w.r.t. other
* "async" itxs corresponding to different objects.
* - the order of "sync" itxs w.r.t. "async" itxs (or vice
* versa) is *not* preserved, even for itxs that correspond
* to the same object.
*
* For more details, see: zil_itx_assign(), zil_async_to_sync(),
* zil_get_commit_list(), and zil_process_commit_list().
*
* 3. The lwbs represent a linked list of blocks on disk. Thus, any
* lwb cannot be considered committed to stable storage, until its
* "previous" lwb is also committed to stable storage. This fact,
* coupled with the fact described above, means that itxs are
* committed in (roughly) the order in which they were generated.
* This is essential because itxs are dependent on prior itxs.
* Thus, we *must not* deem an itx as being committed to stable
* storage, until *all* prior itxs have also been committed to
* stable storage.
*
* To enforce this ordering of lwb zio's, while still leveraging as
* much of the underlying storage performance as possible, we rely
* on two fundamental concepts:
*
* 1. The creation and issuance of lwb zio's is protected by
* the zilog's "zl_issuer_lock", which ensures only a single
* thread is creating and/or issuing lwb's at a time
* 2. The "previous" lwb is a child of the "current" lwb
* (leveraging the zio parent-child dependency graph)
*
* By relying on this parent-child zio relationship, we can have
* many lwb zio's concurrently issued to the underlying storage,
* but the order in which they complete will be the same order in
* which they were created.
*/
void
zil_commit(zilog_t *zilog, uint64_t foid)
{
/*
* We should never attempt to call zil_commit on a snapshot for
* a couple of reasons:
*
* 1. A snapshot may never be modified, thus it cannot have any
* in-flight itxs that would have modified the dataset.
*
* 2. By design, when zil_commit() is called, a commit itx will
* be assigned to this zilog; as a result, the zilog will be
* dirtied. We must not dirty the zilog of a snapshot; there's
* checks in the code that enforce this invariant, and will
* cause a panic if it's not upheld.
*/
ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE);
if (zilog->zl_sync == ZFS_SYNC_DISABLED)
return;
if (!spa_writeable(zilog->zl_spa)) {
/*
* If the SPA is not writable, there should never be any
* pending itxs waiting to be committed to disk. If that
* weren't true, we'd skip writing those itxs out, and
* would break the semantics of zil_commit(); thus, we're
* verifying that truth before we return to the caller.
*/
ASSERT(list_is_empty(&zilog->zl_lwb_list));
ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
for (int i = 0; i < TXG_SIZE; i++)
ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL);
return;
}
/*
* If the ZIL is suspended, we don't want to dirty it by calling
* zil_commit_itx_assign() below, nor can we write out
* lwbs like would be done in zil_commit_write(). Thus, we
* simply rely on txg_wait_synced() to maintain the necessary
* semantics, and avoid calling those functions altogether.
*/
if (zilog->zl_suspend > 0) {
txg_wait_synced(zilog->zl_dmu_pool, 0);
return;
}
zil_commit_impl(zilog, foid);
}
void
zil_commit_impl(zilog_t *zilog, uint64_t foid)
{
ZIL_STAT_BUMP(zilog, zil_commit_count);
/*
* Move the "async" itxs for the specified foid to the "sync"
* queues, such that they will be later committed (or skipped)
* to an lwb when zil_process_commit_list() is called.
*
* Since these "async" itxs must be committed prior to this
* call to zil_commit returning, we must perform this operation
* before we call zil_commit_itx_assign().
*/
zil_async_to_sync(zilog, foid);
/*
* We allocate a new "waiter" structure which will initially be
* linked to the commit itx using the itx's "itx_private" field.
* Since the commit itx doesn't represent any on-disk state,
* when it's committed to an lwb, rather than copying the its
* lr_t into the lwb's buffer, the commit itx's "waiter" will be
* added to the lwb's list of waiters. Then, when the lwb is
* committed to stable storage, each waiter in the lwb's list of
* waiters will be marked "done", and signalled.
*
* We must create the waiter and assign the commit itx prior to
* calling zil_commit_writer(), or else our specific commit itx
* is not guaranteed to be committed to an lwb prior to calling
* zil_commit_waiter().
*/
zil_commit_waiter_t *zcw = zil_alloc_commit_waiter();
zil_commit_itx_assign(zilog, zcw);
uint64_t wtxg = zil_commit_writer(zilog, zcw);
zil_commit_waiter(zilog, zcw);
if (zcw->zcw_zio_error != 0) {
/*
* If there was an error writing out the ZIL blocks that
* this thread is waiting on, then we fallback to
* relying on spa_sync() to write out the data this
* thread is waiting on. Obviously this has performance
* implications, but the expectation is for this to be
* an exceptional case, and shouldn't occur often.
*/
DTRACE_PROBE2(zil__commit__io__error,
zilog_t *, zilog, zil_commit_waiter_t *, zcw);
txg_wait_synced(zilog->zl_dmu_pool, 0);
} else if (wtxg != 0) {
txg_wait_synced(zilog->zl_dmu_pool, wtxg);
}
zil_free_commit_waiter(zcw);
}
/*
* Called in syncing context to free committed log blocks and update log header.
*/
void
zil_sync(zilog_t *zilog, dmu_tx_t *tx)
{
zil_header_t *zh = zil_header_in_syncing_context(zilog);
uint64_t txg = dmu_tx_get_txg(tx);
spa_t *spa = zilog->zl_spa;
uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
lwb_t *lwb;
/*
* We don't zero out zl_destroy_txg, so make sure we don't try
* to destroy it twice.
*/
if (spa_sync_pass(spa) != 1)
return;
zil_lwb_flush_wait_all(zilog, txg);
mutex_enter(&zilog->zl_lock);
ASSERT(zilog->zl_stop_sync == 0);
if (*replayed_seq != 0) {
ASSERT(zh->zh_replay_seq < *replayed_seq);
zh->zh_replay_seq = *replayed_seq;
*replayed_seq = 0;
}
if (zilog->zl_destroy_txg == txg) {
blkptr_t blk = zh->zh_log;
dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
memset(zh, 0, sizeof (zil_header_t));
memset(zilog->zl_replayed_seq, 0,
sizeof (zilog->zl_replayed_seq));
if (zilog->zl_keep_first) {
/*
* If this block was part of log chain that couldn't
* be claimed because a device was missing during
* zil_claim(), but that device later returns,
* then this block could erroneously appear valid.
* To guard against this, assign a new GUID to the new
* log chain so it doesn't matter what blk points to.
*/
zil_init_log_chain(zilog, &blk);
zh->zh_log = blk;
} else {
/*
* A destroyed ZIL chain can't contain any TX_SETSAXATTR
* records. So, deactivate the feature for this dataset.
* We activate it again when we start a new ZIL chain.
*/
if (dsl_dataset_feature_is_active(ds,
SPA_FEATURE_ZILSAXATTR))
dsl_dataset_deactivate_feature(ds,
SPA_FEATURE_ZILSAXATTR, tx);
}
}
while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
zh->zh_log = lwb->lwb_blk;
if (lwb->lwb_state != LWB_STATE_FLUSH_DONE ||
- lwb->lwb_max_txg > txg)
+ lwb->lwb_alloc_txg > txg || lwb->lwb_max_txg > txg)
break;
list_remove(&zilog->zl_lwb_list, lwb);
- zio_free(spa, txg, &lwb->lwb_blk);
+ if (!BP_IS_HOLE(&lwb->lwb_blk))
+ zio_free(spa, txg, &lwb->lwb_blk);
zil_free_lwb(zilog, lwb);
/*
* If we don't have anything left in the lwb list then
* we've had an allocation failure and we need to zero
* out the zil_header blkptr so that we don't end
* up freeing the same block twice.
*/
if (list_is_empty(&zilog->zl_lwb_list))
BP_ZERO(&zh->zh_log);
}
- /*
- * Remove fastwrite on any blocks that have been pre-allocated for
- * the next commit. This prevents fastwrite counter pollution by
- * unused, long-lived LWBs.
- */
- for (; lwb != NULL; lwb = list_next(&zilog->zl_lwb_list, lwb)) {
- if (lwb->lwb_fastwrite && !lwb->lwb_write_zio) {
- metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk);
- lwb->lwb_fastwrite = 0;
- }
- }
-
mutex_exit(&zilog->zl_lock);
}
static int
zil_lwb_cons(void *vbuf, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
lwb_t *lwb = vbuf;
list_create(&lwb->lwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node));
list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t),
offsetof(zil_commit_waiter_t, zcw_node));
avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare,
sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
return (0);
}
static void
zil_lwb_dest(void *vbuf, void *unused)
{
(void) unused;
lwb_t *lwb = vbuf;
mutex_destroy(&lwb->lwb_vdev_lock);
avl_destroy(&lwb->lwb_vdev_tree);
list_destroy(&lwb->lwb_waiters);
list_destroy(&lwb->lwb_itxs);
}
void
zil_init(void)
{
zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
sizeof (lwb_t), 0, zil_lwb_cons, zil_lwb_dest, NULL, NULL, NULL, 0);
zil_zcw_cache = kmem_cache_create("zil_zcw_cache",
sizeof (zil_commit_waiter_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
zil_sums_init(&zil_sums_global);
zil_kstats_global = kstat_create("zfs", 0, "zil", "misc",
KSTAT_TYPE_NAMED, sizeof (zil_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (zil_kstats_global != NULL) {
zil_kstats_global->ks_data = &zil_stats;
zil_kstats_global->ks_update = zil_kstats_global_update;
zil_kstats_global->ks_private = NULL;
kstat_install(zil_kstats_global);
}
}
void
zil_fini(void)
{
kmem_cache_destroy(zil_zcw_cache);
kmem_cache_destroy(zil_lwb_cache);
if (zil_kstats_global != NULL) {
kstat_delete(zil_kstats_global);
zil_kstats_global = NULL;
}
zil_sums_fini(&zil_sums_global);
}
void
zil_set_sync(zilog_t *zilog, uint64_t sync)
{
zilog->zl_sync = sync;
}
void
zil_set_logbias(zilog_t *zilog, uint64_t logbias)
{
zilog->zl_logbias = logbias;
}
zilog_t *
zil_alloc(objset_t *os, zil_header_t *zh_phys)
{
zilog_t *zilog;
zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
zilog->zl_header = zh_phys;
zilog->zl_os = os;
zilog->zl_spa = dmu_objset_spa(os);
zilog->zl_dmu_pool = dmu_objset_pool(os);
zilog->zl_destroy_txg = TXG_INITIAL - 1;
zilog->zl_logbias = dmu_objset_logbias(os);
zilog->zl_sync = dmu_objset_syncprop(os);
zilog->zl_dirty_max_txg = 0;
zilog->zl_last_lwb_opened = NULL;
zilog->zl_last_lwb_latency = 0;
zilog->zl_max_block_size = zil_maxblocksize;
mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zilog->zl_lwb_io_lock, NULL, MUTEX_DEFAULT, NULL);
for (int i = 0; i < TXG_SIZE; i++) {
mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
MUTEX_DEFAULT, NULL);
}
list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
offsetof(lwb_t, lwb_node));
list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
cv_init(&zilog->zl_lwb_io_cv, NULL, CV_DEFAULT, NULL);
return (zilog);
}
void
zil_free(zilog_t *zilog)
{
int i;
zilog->zl_stop_sync = 1;
ASSERT0(zilog->zl_suspend);
ASSERT0(zilog->zl_suspending);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
list_destroy(&zilog->zl_lwb_list);
ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
list_destroy(&zilog->zl_itx_commit_list);
for (i = 0; i < TXG_SIZE; i++) {
/*
* It's possible for an itx to be generated that doesn't dirty
* a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
* callback to remove the entry. We remove those here.
*
* Also free up the ziltest itxs.
*/
if (zilog->zl_itxg[i].itxg_itxs)
zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs);
mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
}
mutex_destroy(&zilog->zl_issuer_lock);
mutex_destroy(&zilog->zl_lock);
mutex_destroy(&zilog->zl_lwb_io_lock);
cv_destroy(&zilog->zl_cv_suspend);
cv_destroy(&zilog->zl_lwb_io_cv);
kmem_free(zilog, sizeof (zilog_t));
}
/*
* Open an intent log.
*/
zilog_t *
zil_open(objset_t *os, zil_get_data_t *get_data, zil_sums_t *zil_sums)
{
zilog_t *zilog = dmu_objset_zil(os);
ASSERT3P(zilog->zl_get_data, ==, NULL);
ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
zilog->zl_get_data = get_data;
zilog->zl_sums = zil_sums;
return (zilog);
}
/*
* Close an intent log.
*/
void
zil_close(zilog_t *zilog)
{
lwb_t *lwb;
uint64_t txg;
if (!dmu_objset_is_snapshot(zilog->zl_os)) {
zil_commit(zilog, 0);
} else {
ASSERT(list_is_empty(&zilog->zl_lwb_list));
ASSERT0(zilog->zl_dirty_max_txg);
ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE);
}
mutex_enter(&zilog->zl_lock);
+ txg = zilog->zl_dirty_max_txg;
lwb = list_tail(&zilog->zl_lwb_list);
- if (lwb == NULL)
- txg = zilog->zl_dirty_max_txg;
- else
- txg = MAX(zilog->zl_dirty_max_txg, lwb->lwb_max_txg);
+ if (lwb != NULL) {
+ txg = MAX(txg, lwb->lwb_alloc_txg);
+ txg = MAX(txg, lwb->lwb_max_txg);
+ }
mutex_exit(&zilog->zl_lock);
/*
* zl_lwb_max_issued_txg may be larger than lwb_max_txg. It depends
* on the time when the dmu_tx transaction is assigned in
- * zil_lwb_write_close().
+ * zil_lwb_write_issue().
*/
mutex_enter(&zilog->zl_lwb_io_lock);
txg = MAX(zilog->zl_lwb_max_issued_txg, txg);
mutex_exit(&zilog->zl_lwb_io_lock);
/*
* We need to use txg_wait_synced() to wait until that txg is synced.
* zil_sync() will guarantee all lwbs up to that txg have been
* written out, flushed, and cleaned.
*/
if (txg != 0)
txg_wait_synced(zilog->zl_dmu_pool, txg);
if (zilog_is_dirty(zilog))
zfs_dbgmsg("zil (%px) is dirty, txg %llu", zilog,
(u_longlong_t)txg);
if (txg < spa_freeze_txg(zilog->zl_spa))
VERIFY(!zilog_is_dirty(zilog));
zilog->zl_get_data = NULL;
/*
* We should have only one lwb left on the list; remove it now.
*/
mutex_enter(&zilog->zl_lock);
lwb = list_remove_head(&zilog->zl_lwb_list);
if (lwb != NULL) {
ASSERT(list_is_empty(&zilog->zl_lwb_list));
- ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
-
- if (lwb->lwb_fastwrite)
- metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk);
-
+ ASSERT3S(lwb->lwb_state, ==, LWB_STATE_NEW);
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
zil_free_lwb(zilog, lwb);
}
mutex_exit(&zilog->zl_lock);
}
static const char *suspend_tag = "zil suspending";
/*
* Suspend an intent log. While in suspended mode, we still honor
* synchronous semantics, but we rely on txg_wait_synced() to do it.
* On old version pools, we suspend the log briefly when taking a
* snapshot so that it will have an empty intent log.
*
* Long holds are not really intended to be used the way we do here --
* held for such a short time. A concurrent caller of dsl_dataset_long_held()
* could fail. Therefore we take pains to only put a long hold if it is
* actually necessary. Fortunately, it will only be necessary if the
* objset is currently mounted (or the ZVOL equivalent). In that case it
* will already have a long hold, so we are not really making things any worse.
*
* Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or
* zvol_state_t), and use their mechanism to prevent their hold from being
* dropped (e.g. VFS_HOLD()). However, that would be even more pain for
* very little gain.
*
* if cookiep == NULL, this does both the suspend & resume.
* Otherwise, it returns with the dataset "long held", and the cookie
* should be passed into zil_resume().
*/
int
zil_suspend(const char *osname, void **cookiep)
{
objset_t *os;
zilog_t *zilog;
const zil_header_t *zh;
int error;
error = dmu_objset_hold(osname, suspend_tag, &os);
if (error != 0)
return (error);
zilog = dmu_objset_zil(os);
mutex_enter(&zilog->zl_lock);
zh = zilog->zl_header;
if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */
mutex_exit(&zilog->zl_lock);
dmu_objset_rele(os, suspend_tag);
return (SET_ERROR(EBUSY));
}
/*
* Don't put a long hold in the cases where we can avoid it. This
* is when there is no cookie so we are doing a suspend & resume
* (i.e. called from zil_vdev_offline()), and there's nothing to do
* for the suspend because it's already suspended, or there's no ZIL.
*/
if (cookiep == NULL && !zilog->zl_suspending &&
(zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) {
mutex_exit(&zilog->zl_lock);
dmu_objset_rele(os, suspend_tag);
return (0);
}
dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag);
dsl_pool_rele(dmu_objset_pool(os), suspend_tag);
zilog->zl_suspend++;
if (zilog->zl_suspend > 1) {
/*
* Someone else is already suspending it.
* Just wait for them to finish.
*/
while (zilog->zl_suspending)
cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
mutex_exit(&zilog->zl_lock);
if (cookiep == NULL)
zil_resume(os);
else
*cookiep = os;
return (0);
}
/*
* If there is no pointer to an on-disk block, this ZIL must not
* be active (e.g. filesystem not mounted), so there's nothing
* to clean up.
*/
if (BP_IS_HOLE(&zh->zh_log)) {
ASSERT(cookiep != NULL); /* fast path already handled */
*cookiep = os;
mutex_exit(&zilog->zl_lock);
return (0);
}
/*
* The ZIL has work to do. Ensure that the associated encryption
* key will remain mapped while we are committing the log by
* grabbing a reference to it. If the key isn't loaded we have no
* choice but to return an error until the wrapping key is loaded.
*/
if (os->os_encrypted &&
dsl_dataset_create_key_mapping(dmu_objset_ds(os)) != 0) {
zilog->zl_suspend--;
mutex_exit(&zilog->zl_lock);
dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
return (SET_ERROR(EACCES));
}
zilog->zl_suspending = B_TRUE;
mutex_exit(&zilog->zl_lock);
/*
* We need to use zil_commit_impl to ensure we wait for all
- * LWB_STATE_OPENED and LWB_STATE_ISSUED lwbs to be committed
+ * LWB_STATE_OPENED, _CLOSED and _READY lwbs to be committed
* to disk before proceeding. If we used zil_commit instead, it
* would just call txg_wait_synced(), because zl_suspend is set.
* txg_wait_synced() doesn't wait for these lwb's to be
* LWB_STATE_FLUSH_DONE before returning.
*/
zil_commit_impl(zilog, 0);
/*
* Now that we've ensured all lwb's are LWB_STATE_FLUSH_DONE, we
* use txg_wait_synced() to ensure the data from the zilog has
* migrated to the main pool before calling zil_destroy().
*/
txg_wait_synced(zilog->zl_dmu_pool, 0);
zil_destroy(zilog, B_FALSE);
mutex_enter(&zilog->zl_lock);
zilog->zl_suspending = B_FALSE;
cv_broadcast(&zilog->zl_cv_suspend);
mutex_exit(&zilog->zl_lock);
if (os->os_encrypted)
dsl_dataset_remove_key_mapping(dmu_objset_ds(os));
if (cookiep == NULL)
zil_resume(os);
else
*cookiep = os;
return (0);
}
void
zil_resume(void *cookie)
{
objset_t *os = cookie;
zilog_t *zilog = dmu_objset_zil(os);
mutex_enter(&zilog->zl_lock);
ASSERT(zilog->zl_suspend != 0);
zilog->zl_suspend--;
mutex_exit(&zilog->zl_lock);
dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
}
typedef struct zil_replay_arg {
zil_replay_func_t *const *zr_replay;
void *zr_arg;
boolean_t zr_byteswap;
char *zr_lr;
} zil_replay_arg_t;
static int
zil_replay_error(zilog_t *zilog, const lr_t *lr, int error)
{
char name[ZFS_MAX_DATASET_NAME_LEN];
zilog->zl_replaying_seq--; /* didn't actually replay this one */
dmu_objset_name(zilog->zl_os, name);
cmn_err(CE_WARN, "ZFS replay transaction error %d, "
"dataset %s, seq 0x%llx, txtype %llu %s\n", error, name,
(u_longlong_t)lr->lrc_seq,
(u_longlong_t)(lr->lrc_txtype & ~TX_CI),
(lr->lrc_txtype & TX_CI) ? "CI" : "");
return (error);
}
static int
zil_replay_log_record(zilog_t *zilog, const lr_t *lr, void *zra,
uint64_t claim_txg)
{
zil_replay_arg_t *zr = zra;
const zil_header_t *zh = zilog->zl_header;
uint64_t reclen = lr->lrc_reclen;
uint64_t txtype = lr->lrc_txtype;
int error = 0;
zilog->zl_replaying_seq = lr->lrc_seq;
if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */
return (0);
if (lr->lrc_txg < claim_txg) /* already committed */
return (0);
/* Strip case-insensitive bit, still present in log record */
txtype &= ~TX_CI;
if (txtype == 0 || txtype >= TX_MAX_TYPE)
return (zil_replay_error(zilog, lr, EINVAL));
/*
* If this record type can be logged out of order, the object
* (lr_foid) may no longer exist. That's legitimate, not an error.
*/
if (TX_OOO(txtype)) {
error = dmu_object_info(zilog->zl_os,
LR_FOID_GET_OBJ(((lr_ooo_t *)lr)->lr_foid), NULL);
if (error == ENOENT || error == EEXIST)
return (0);
}
/*
* Make a copy of the data so we can revise and extend it.
*/
memcpy(zr->zr_lr, lr, reclen);
/*
* If this is a TX_WRITE with a blkptr, suck in the data.
*/
if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
error = zil_read_log_data(zilog, (lr_write_t *)lr,
zr->zr_lr + reclen);
if (error != 0)
return (zil_replay_error(zilog, lr, error));
}
/*
* The log block containing this lr may have been byteswapped
* so that we can easily examine common fields like lrc_txtype.
* However, the log is a mix of different record types, and only the
* replay vectors know how to byteswap their records. Therefore, if
* the lr was byteswapped, undo it before invoking the replay vector.
*/
if (zr->zr_byteswap)
byteswap_uint64_array(zr->zr_lr, reclen);
/*
* We must now do two things atomically: replay this log record,
* and update the log header sequence number to reflect the fact that
* we did so. At the end of each replay function the sequence number
* is updated if we are in replay mode.
*/
error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap);
if (error != 0) {
/*
* The DMU's dnode layer doesn't see removes until the txg
* commits, so a subsequent claim can spuriously fail with
* EEXIST. So if we receive any error we try syncing out
* any removes then retry the transaction. Note that we
* specify B_FALSE for byteswap now, so we don't do it twice.
*/
txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE);
if (error != 0)
return (zil_replay_error(zilog, lr, error));
}
return (0);
}
static int
zil_incr_blks(zilog_t *zilog, const blkptr_t *bp, void *arg, uint64_t claim_txg)
{
(void) bp, (void) arg, (void) claim_txg;
zilog->zl_replay_blks++;
return (0);
}
/*
* If this dataset has a non-empty intent log, replay it and destroy it.
* Return B_TRUE if there were any entries to replay.
*/
boolean_t
zil_replay(objset_t *os, void *arg,
zil_replay_func_t *const replay_func[TX_MAX_TYPE])
{
zilog_t *zilog = dmu_objset_zil(os);
const zil_header_t *zh = zilog->zl_header;
zil_replay_arg_t zr;
if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
return (zil_destroy(zilog, B_TRUE));
}
zr.zr_replay = replay_func;
zr.zr_arg = arg;
zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
/*
* Wait for in-progress removes to sync before starting replay.
*/
txg_wait_synced(zilog->zl_dmu_pool, 0);
zilog->zl_replay = B_TRUE;
zilog->zl_replay_time = ddi_get_lbolt();
ASSERT(zilog->zl_replay_blks == 0);
(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
zh->zh_claim_txg, B_TRUE);
vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
zil_destroy(zilog, B_FALSE);
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
zilog->zl_replay = B_FALSE;
return (B_TRUE);
}
boolean_t
zil_replaying(zilog_t *zilog, dmu_tx_t *tx)
{
if (zilog->zl_sync == ZFS_SYNC_DISABLED)
return (B_TRUE);
if (zilog->zl_replay) {
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
zilog->zl_replaying_seq;
return (B_TRUE);
}
return (B_FALSE);
}
int
zil_reset(const char *osname, void *arg)
{
(void) arg;
int error = zil_suspend(osname, NULL);
/* EACCES means crypto key not loaded */
if ((error == EACCES) || (error == EBUSY))
return (SET_ERROR(error));
if (error != 0)
return (SET_ERROR(EEXIST));
return (0);
}
EXPORT_SYMBOL(zil_alloc);
EXPORT_SYMBOL(zil_free);
EXPORT_SYMBOL(zil_open);
EXPORT_SYMBOL(zil_close);
EXPORT_SYMBOL(zil_replay);
EXPORT_SYMBOL(zil_replaying);
EXPORT_SYMBOL(zil_destroy);
EXPORT_SYMBOL(zil_destroy_sync);
EXPORT_SYMBOL(zil_itx_create);
EXPORT_SYMBOL(zil_itx_destroy);
EXPORT_SYMBOL(zil_itx_assign);
EXPORT_SYMBOL(zil_commit);
EXPORT_SYMBOL(zil_claim);
EXPORT_SYMBOL(zil_check_log_chain);
EXPORT_SYMBOL(zil_sync);
EXPORT_SYMBOL(zil_clean);
EXPORT_SYMBOL(zil_suspend);
EXPORT_SYMBOL(zil_resume);
EXPORT_SYMBOL(zil_lwb_add_block);
EXPORT_SYMBOL(zil_bp_tree_add);
EXPORT_SYMBOL(zil_set_sync);
EXPORT_SYMBOL(zil_set_logbias);
EXPORT_SYMBOL(zil_sums_init);
EXPORT_SYMBOL(zil_sums_fini);
EXPORT_SYMBOL(zil_kstat_values_update);
ZFS_MODULE_PARAM(zfs, zfs_, commit_timeout_pct, UINT, ZMOD_RW,
"ZIL block open timeout percentage");
ZFS_MODULE_PARAM(zfs_zil, zil_, min_commit_timeout, U64, ZMOD_RW,
"Minimum delay we care for ZIL block commit");
ZFS_MODULE_PARAM(zfs_zil, zil_, replay_disable, INT, ZMOD_RW,
"Disable intent logging replay");
ZFS_MODULE_PARAM(zfs_zil, zil_, nocacheflush, INT, ZMOD_RW,
"Disable ZIL cache flushes");
ZFS_MODULE_PARAM(zfs_zil, zil_, slog_bulk, U64, ZMOD_RW,
"Limit in bytes slog sync writes per commit");
ZFS_MODULE_PARAM(zfs_zil, zil_, maxblocksize, UINT, ZMOD_RW,
"Limit in bytes of ZIL log block size");
diff --git a/sys/contrib/openzfs/module/zfs/zio.c b/sys/contrib/openzfs/module/zfs/zio.c
index 10279fde89df..3b3b40fa73d8 100644
--- a/sys/contrib/openzfs/module/zfs/zio.c
+++ b/sys/contrib/openzfs/module/zfs/zio.c
@@ -1,5168 +1,5169 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2022 by Delphix. All rights reserved.
* Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2021, Datto, Inc.
*/
#include <sys/sysmacros.h>
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_trim.h>
#include <sys/zio_impl.h>
#include <sys/zio_compress.h>
#include <sys/zio_checksum.h>
#include <sys/dmu_objset.h>
#include <sys/arc.h>
#include <sys/brt.h>
#include <sys/ddt.h>
#include <sys/blkptr.h>
#include <sys/zfeature.h>
#include <sys/dsl_scan.h>
#include <sys/metaslab_impl.h>
#include <sys/time.h>
#include <sys/trace_zfs.h>
#include <sys/abd.h>
#include <sys/dsl_crypt.h>
#include <cityhash.h>
/*
* ==========================================================================
* I/O type descriptions
* ==========================================================================
*/
const char *const zio_type_name[ZIO_TYPES] = {
/*
* Note: Linux kernel thread name length is limited
* so these names will differ from upstream open zfs.
*/
"z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_ioctl", "z_trim"
};
int zio_dva_throttle_enabled = B_TRUE;
static int zio_deadman_log_all = B_FALSE;
/*
* ==========================================================================
* I/O kmem caches
* ==========================================================================
*/
static kmem_cache_t *zio_cache;
static kmem_cache_t *zio_link_cache;
kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
static uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
static uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
#endif
/* Mark IOs as "slow" if they take longer than 30 seconds */
static uint_t zio_slow_io_ms = (30 * MILLISEC);
#define BP_SPANB(indblkshift, level) \
(((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
#define COMPARE_META_LEVEL 0x80000000ul
/*
* The following actions directly effect the spa's sync-to-convergence logic.
* The values below define the sync pass when we start performing the action.
* Care should be taken when changing these values as they directly impact
* spa_sync() performance. Tuning these values may introduce subtle performance
* pathologies and should only be done in the context of performance analysis.
* These tunables will eventually be removed and replaced with #defines once
* enough analysis has been done to determine optimal values.
*
* The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
* regular blocks are not deferred.
*
* Starting in sync pass 8 (zfs_sync_pass_dont_compress), we disable
* compression (including of metadata). In practice, we don't have this
* many sync passes, so this has no effect.
*
* The original intent was that disabling compression would help the sync
* passes to converge. However, in practice disabling compression increases
* the average number of sync passes, because when we turn compression off, a
* lot of block's size will change and thus we have to re-allocate (not
* overwrite) them. It also increases the number of 128KB allocations (e.g.
* for indirect blocks and spacemaps) because these will not be compressed.
* The 128K allocations are especially detrimental to performance on highly
* fragmented systems, which may have very few free segments of this size,
* and may need to load new metaslabs to satisfy 128K allocations.
*/
/* defer frees starting in this pass */
uint_t zfs_sync_pass_deferred_free = 2;
/* don't compress starting in this pass */
static uint_t zfs_sync_pass_dont_compress = 8;
/* rewrite new bps starting in this pass */
static uint_t zfs_sync_pass_rewrite = 2;
/*
* An allocating zio is one that either currently has the DVA allocate
* stage set or will have it later in its lifetime.
*/
#define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
/*
* Enable smaller cores by excluding metadata
* allocations as well.
*/
int zio_exclude_metadata = 0;
static int zio_requeue_io_start_cut_in_line = 1;
#ifdef ZFS_DEBUG
static const int zio_buf_debug_limit = 16384;
#else
static const int zio_buf_debug_limit = 0;
#endif
static inline void __zio_execute(zio_t *zio);
static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t);
void
zio_init(void)
{
size_t c;
zio_cache = kmem_cache_create("zio_cache",
sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
zio_link_cache = kmem_cache_create("zio_link_cache",
sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
/*
* For small buffers, we want a cache for each multiple of
* SPA_MINBLOCKSIZE. For larger buffers, we want a cache
* for each quarter-power of 2.
*/
for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
size_t p2 = size;
size_t align = 0;
size_t data_cflags, cflags;
data_cflags = KMC_NODEBUG;
cflags = (zio_exclude_metadata || size > zio_buf_debug_limit) ?
KMC_NODEBUG : 0;
while (!ISP2(p2))
p2 &= p2 - 1;
#ifndef _KERNEL
/*
* If we are using watchpoints, put each buffer on its own page,
* to eliminate the performance overhead of trapping to the
* kernel when modifying a non-watched buffer that shares the
* page with a watched buffer.
*/
if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE))
continue;
/*
* Here's the problem - on 4K native devices in userland on
* Linux using O_DIRECT, buffers must be 4K aligned or I/O
* will fail with EINVAL, causing zdb (and others) to coredump.
* Since userland probably doesn't need optimized buffer caches,
* we just force 4K alignment on everything.
*/
align = 8 * SPA_MINBLOCKSIZE;
#else
if (size < PAGESIZE) {
align = SPA_MINBLOCKSIZE;
} else if (IS_P2ALIGNED(size, p2 >> 2)) {
align = PAGESIZE;
}
#endif
if (align != 0) {
char name[36];
if (cflags == data_cflags) {
/*
* Resulting kmem caches would be identical.
* Save memory by creating only one.
*/
(void) snprintf(name, sizeof (name),
"zio_buf_comb_%lu", (ulong_t)size);
zio_buf_cache[c] = kmem_cache_create(name,
size, align, NULL, NULL, NULL, NULL, NULL,
cflags);
zio_data_buf_cache[c] = zio_buf_cache[c];
continue;
}
(void) snprintf(name, sizeof (name), "zio_buf_%lu",
(ulong_t)size);
zio_buf_cache[c] = kmem_cache_create(name, size,
align, NULL, NULL, NULL, NULL, NULL, cflags);
(void) snprintf(name, sizeof (name), "zio_data_buf_%lu",
(ulong_t)size);
zio_data_buf_cache[c] = kmem_cache_create(name, size,
align, NULL, NULL, NULL, NULL, NULL, data_cflags);
}
}
while (--c != 0) {
ASSERT(zio_buf_cache[c] != NULL);
if (zio_buf_cache[c - 1] == NULL)
zio_buf_cache[c - 1] = zio_buf_cache[c];
ASSERT(zio_data_buf_cache[c] != NULL);
if (zio_data_buf_cache[c - 1] == NULL)
zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
}
zio_inject_init();
lz4_init();
}
void
zio_fini(void)
{
size_t n = SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT;
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
for (size_t i = 0; i < n; i++) {
if (zio_buf_cache_allocs[i] != zio_buf_cache_frees[i])
(void) printf("zio_fini: [%d] %llu != %llu\n",
(int)((i + 1) << SPA_MINBLOCKSHIFT),
(long long unsigned)zio_buf_cache_allocs[i],
(long long unsigned)zio_buf_cache_frees[i]);
}
#endif
/*
* The same kmem cache can show up multiple times in both zio_buf_cache
* and zio_data_buf_cache. Do a wasteful but trivially correct scan to
* sort it out.
*/
for (size_t i = 0; i < n; i++) {
kmem_cache_t *cache = zio_buf_cache[i];
if (cache == NULL)
continue;
for (size_t j = i; j < n; j++) {
if (cache == zio_buf_cache[j])
zio_buf_cache[j] = NULL;
if (cache == zio_data_buf_cache[j])
zio_data_buf_cache[j] = NULL;
}
kmem_cache_destroy(cache);
}
for (size_t i = 0; i < n; i++) {
kmem_cache_t *cache = zio_data_buf_cache[i];
if (cache == NULL)
continue;
for (size_t j = i; j < n; j++) {
if (cache == zio_data_buf_cache[j])
zio_data_buf_cache[j] = NULL;
}
kmem_cache_destroy(cache);
}
for (size_t i = 0; i < n; i++) {
VERIFY3P(zio_buf_cache[i], ==, NULL);
VERIFY3P(zio_data_buf_cache[i], ==, NULL);
}
kmem_cache_destroy(zio_link_cache);
kmem_cache_destroy(zio_cache);
zio_inject_fini();
lz4_fini();
}
/*
* ==========================================================================
* Allocate and free I/O buffers
* ==========================================================================
*/
/*
* Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a
* crashdump if the kernel panics, so use it judiciously. Obviously, it's
* useful to inspect ZFS metadata, but if possible, we should avoid keeping
* excess / transient data in-core during a crashdump.
*/
void *
zio_buf_alloc(size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
atomic_add_64(&zio_buf_cache_allocs[c], 1);
#endif
return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE));
}
/*
* Use zio_data_buf_alloc to allocate data. The data will not appear in a
* crashdump if the kernel panics. This exists so that we will limit the amount
* of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount
* of kernel heap dumped to disk when the kernel panics)
*/
void *
zio_data_buf_alloc(size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
}
void
zio_buf_free(void *buf, size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
atomic_add_64(&zio_buf_cache_frees[c], 1);
#endif
kmem_cache_free(zio_buf_cache[c], buf);
}
void
zio_data_buf_free(void *buf, size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
kmem_cache_free(zio_data_buf_cache[c], buf);
}
static void
zio_abd_free(void *abd, size_t size)
{
(void) size;
abd_free((abd_t *)abd);
}
/*
* ==========================================================================
* Push and pop I/O transform buffers
* ==========================================================================
*/
void
zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize,
zio_transform_func_t *transform)
{
zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
zt->zt_orig_abd = zio->io_abd;
zt->zt_orig_size = zio->io_size;
zt->zt_bufsize = bufsize;
zt->zt_transform = transform;
zt->zt_next = zio->io_transform_stack;
zio->io_transform_stack = zt;
zio->io_abd = data;
zio->io_size = size;
}
void
zio_pop_transforms(zio_t *zio)
{
zio_transform_t *zt;
while ((zt = zio->io_transform_stack) != NULL) {
if (zt->zt_transform != NULL)
zt->zt_transform(zio,
zt->zt_orig_abd, zt->zt_orig_size);
if (zt->zt_bufsize != 0)
abd_free(zio->io_abd);
zio->io_abd = zt->zt_orig_abd;
zio->io_size = zt->zt_orig_size;
zio->io_transform_stack = zt->zt_next;
kmem_free(zt, sizeof (zio_transform_t));
}
}
/*
* ==========================================================================
* I/O transform callbacks for subblocks, decompression, and decryption
* ==========================================================================
*/
static void
zio_subblock(zio_t *zio, abd_t *data, uint64_t size)
{
ASSERT(zio->io_size > size);
if (zio->io_type == ZIO_TYPE_READ)
abd_copy(data, zio->io_abd, size);
}
static void
zio_decompress(zio_t *zio, abd_t *data, uint64_t size)
{
if (zio->io_error == 0) {
void *tmp = abd_borrow_buf(data, size);
int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
zio->io_abd, tmp, zio->io_size, size,
&zio->io_prop.zp_complevel);
abd_return_buf_copy(data, tmp, size);
if (zio_injection_enabled && ret == 0)
ret = zio_handle_fault_injection(zio, EINVAL);
if (ret != 0)
zio->io_error = SET_ERROR(EIO);
}
}
static void
zio_decrypt(zio_t *zio, abd_t *data, uint64_t size)
{
int ret;
void *tmp;
blkptr_t *bp = zio->io_bp;
spa_t *spa = zio->io_spa;
uint64_t dsobj = zio->io_bookmark.zb_objset;
uint64_t lsize = BP_GET_LSIZE(bp);
dmu_object_type_t ot = BP_GET_TYPE(bp);
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
ASSERT(BP_USES_CRYPT(bp));
ASSERT3U(size, !=, 0);
if (zio->io_error != 0)
return;
/*
* Verify the cksum of MACs stored in an indirect bp. It will always
* be possible to verify this since it does not require an encryption
* key.
*/
if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) {
zio_crypt_decode_mac_bp(bp, mac);
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
/*
* We haven't decompressed the data yet, but
* zio_crypt_do_indirect_mac_checksum() requires
* decompressed data to be able to parse out the MACs
* from the indirect block. We decompress it now and
* throw away the result after we are finished.
*/
tmp = zio_buf_alloc(lsize);
ret = zio_decompress_data(BP_GET_COMPRESS(bp),
zio->io_abd, tmp, zio->io_size, lsize,
&zio->io_prop.zp_complevel);
if (ret != 0) {
ret = SET_ERROR(EIO);
goto error;
}
ret = zio_crypt_do_indirect_mac_checksum(B_FALSE,
tmp, lsize, BP_SHOULD_BYTESWAP(bp), mac);
zio_buf_free(tmp, lsize);
} else {
ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac);
}
abd_copy(data, zio->io_abd, size);
if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) {
ret = zio_handle_decrypt_injection(spa,
&zio->io_bookmark, ot, ECKSUM);
}
if (ret != 0)
goto error;
return;
}
/*
* If this is an authenticated block, just check the MAC. It would be
* nice to separate this out into its own flag, but when this was done,
* we had run out of bits in what is now zio_flag_t. Future cleanup
* could make this a flag bit.
*/
if (BP_IS_AUTHENTICATED(bp)) {
if (ot == DMU_OT_OBJSET) {
ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa,
dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp));
} else {
zio_crypt_decode_mac_bp(bp, mac);
ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj,
zio->io_abd, size, mac);
if (zio_injection_enabled && ret == 0) {
ret = zio_handle_decrypt_injection(spa,
&zio->io_bookmark, ot, ECKSUM);
}
}
abd_copy(data, zio->io_abd, size);
if (ret != 0)
goto error;
return;
}
zio_crypt_decode_params_bp(bp, salt, iv);
if (ot == DMU_OT_INTENT_LOG) {
tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t));
zio_crypt_decode_mac_zil(tmp, mac);
abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t));
} else {
zio_crypt_decode_mac_bp(bp, mac);
}
ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp),
BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data,
zio->io_abd, &no_crypt);
if (no_crypt)
abd_copy(data, zio->io_abd, size);
if (ret != 0)
goto error;
return;
error:
/* assert that the key was found unless this was speculative */
ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE));
/*
* If there was a decryption / authentication error return EIO as
* the io_error. If this was not a speculative zio, create an ereport.
*/
if (ret == ECKSUM) {
zio->io_error = SET_ERROR(EIO);
if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
spa_log_error(spa, &zio->io_bookmark,
&zio->io_bp->blk_birth);
(void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, &zio->io_bookmark, zio, 0);
}
} else {
zio->io_error = ret;
}
}
/*
* ==========================================================================
* I/O parent/child relationships and pipeline interlocks
* ==========================================================================
*/
zio_t *
zio_walk_parents(zio_t *cio, zio_link_t **zl)
{
list_t *pl = &cio->io_parent_list;
*zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl);
if (*zl == NULL)
return (NULL);
ASSERT((*zl)->zl_child == cio);
return ((*zl)->zl_parent);
}
zio_t *
zio_walk_children(zio_t *pio, zio_link_t **zl)
{
list_t *cl = &pio->io_child_list;
ASSERT(MUTEX_HELD(&pio->io_lock));
*zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl);
if (*zl == NULL)
return (NULL);
ASSERT((*zl)->zl_parent == pio);
return ((*zl)->zl_child);
}
zio_t *
zio_unique_parent(zio_t *cio)
{
zio_link_t *zl = NULL;
zio_t *pio = zio_walk_parents(cio, &zl);
VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL);
return (pio);
}
void
zio_add_child(zio_t *pio, zio_t *cio)
{
/*
* Logical I/Os can have logical, gang, or vdev children.
* Gang I/Os can have gang or vdev children.
* Vdev I/Os can only have vdev children.
* The following ASSERT captures all of these constraints.
*/
ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
zl->zl_parent = pio;
zl->zl_child = cio;
mutex_enter(&pio->io_lock);
mutex_enter(&cio->io_lock);
ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
uint64_t *countp = pio->io_children[cio->io_child_type];
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
countp[w] += !cio->io_state[w];
list_insert_head(&pio->io_child_list, zl);
list_insert_head(&cio->io_parent_list, zl);
mutex_exit(&cio->io_lock);
mutex_exit(&pio->io_lock);
}
void
zio_add_child_first(zio_t *pio, zio_t *cio)
{
/*
* Logical I/Os can have logical, gang, or vdev children.
* Gang I/Os can have gang or vdev children.
* Vdev I/Os can only have vdev children.
* The following ASSERT captures all of these constraints.
*/
ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
zl->zl_parent = pio;
zl->zl_child = cio;
ASSERT(list_is_empty(&cio->io_parent_list));
list_insert_head(&cio->io_parent_list, zl);
mutex_enter(&pio->io_lock);
ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
uint64_t *countp = pio->io_children[cio->io_child_type];
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
countp[w] += !cio->io_state[w];
list_insert_head(&pio->io_child_list, zl);
mutex_exit(&pio->io_lock);
}
static void
zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
{
ASSERT(zl->zl_parent == pio);
ASSERT(zl->zl_child == cio);
mutex_enter(&pio->io_lock);
mutex_enter(&cio->io_lock);
list_remove(&pio->io_child_list, zl);
list_remove(&cio->io_parent_list, zl);
mutex_exit(&cio->io_lock);
mutex_exit(&pio->io_lock);
kmem_cache_free(zio_link_cache, zl);
}
static boolean_t
zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
{
boolean_t waiting = B_FALSE;
mutex_enter(&zio->io_lock);
ASSERT(zio->io_stall == NULL);
for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
continue;
uint64_t *countp = &zio->io_children[c][wait];
if (*countp != 0) {
zio->io_stage >>= 1;
ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
zio->io_stall = countp;
waiting = B_TRUE;
break;
}
}
mutex_exit(&zio->io_lock);
return (waiting);
}
__attribute__((always_inline))
static inline void
zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait,
zio_t **next_to_executep)
{
uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
int *errorp = &pio->io_child_error[zio->io_child_type];
mutex_enter(&pio->io_lock);
if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
*errorp = zio_worst_error(*errorp, zio->io_error);
pio->io_reexecute |= zio->io_reexecute;
ASSERT3U(*countp, >, 0);
(*countp)--;
if (*countp == 0 && pio->io_stall == countp) {
zio_taskq_type_t type =
pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE :
ZIO_TASKQ_INTERRUPT;
pio->io_stall = NULL;
mutex_exit(&pio->io_lock);
/*
* If we can tell the caller to execute this parent next, do
* so. We only do this if the parent's zio type matches the
* child's type. Otherwise dispatch the parent zio in its
* own taskq.
*
* Having the caller execute the parent when possible reduces
* locking on the zio taskq's, reduces context switch
* overhead, and has no recursion penalty. Note that one
* read from disk typically causes at least 3 zio's: a
* zio_null(), the logical zio_read(), and then a physical
* zio. When the physical ZIO completes, we are able to call
* zio_done() on all 3 of these zio's from one invocation of
* zio_execute() by returning the parent back to
* zio_execute(). Since the parent isn't executed until this
* thread returns back to zio_execute(), the caller should do
* so promptly.
*
* In other cases, dispatching the parent prevents
* overflowing the stack when we have deeply nested
* parent-child relationships, as we do with the "mega zio"
* of writes for spa_sync(), and the chain of ZIL blocks.
*/
if (next_to_executep != NULL && *next_to_executep == NULL &&
pio->io_type == zio->io_type) {
*next_to_executep = pio;
} else {
zio_taskq_dispatch(pio, type, B_FALSE);
}
} else {
mutex_exit(&pio->io_lock);
}
}
static void
zio_inherit_child_errors(zio_t *zio, enum zio_child c)
{
if (zio->io_child_error[c] != 0 && zio->io_error == 0)
zio->io_error = zio->io_child_error[c];
}
int
zio_bookmark_compare(const void *x1, const void *x2)
{
const zio_t *z1 = x1;
const zio_t *z2 = x2;
if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset)
return (-1);
if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset)
return (1);
if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object)
return (-1);
if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object)
return (1);
if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level)
return (-1);
if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level)
return (1);
if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid)
return (-1);
if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid)
return (1);
if (z1 < z2)
return (-1);
if (z1 > z2)
return (1);
return (0);
}
/*
* ==========================================================================
* Create the various types of I/O (read, write, free, etc)
* ==========================================================================
*/
static zio_t *
zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done,
void *private, zio_type_t type, zio_priority_t priority,
zio_flag_t flags, vdev_t *vd, uint64_t offset,
const zbookmark_phys_t *zb, enum zio_stage stage,
enum zio_stage pipeline)
{
zio_t *zio;
IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE);
ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
ASSERT(vd || stage == ZIO_STAGE_OPEN);
IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0);
zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
memset(zio, 0, sizeof (zio_t));
mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL);
cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
list_create(&zio->io_parent_list, sizeof (zio_link_t),
offsetof(zio_link_t, zl_parent_node));
list_create(&zio->io_child_list, sizeof (zio_link_t),
offsetof(zio_link_t, zl_child_node));
metaslab_trace_init(&zio->io_alloc_list);
if (vd != NULL)
zio->io_child_type = ZIO_CHILD_VDEV;
else if (flags & ZIO_FLAG_GANG_CHILD)
zio->io_child_type = ZIO_CHILD_GANG;
else if (flags & ZIO_FLAG_DDT_CHILD)
zio->io_child_type = ZIO_CHILD_DDT;
else
zio->io_child_type = ZIO_CHILD_LOGICAL;
if (bp != NULL) {
if (type != ZIO_TYPE_WRITE ||
zio->io_child_type == ZIO_CHILD_DDT) {
zio->io_bp_copy = *bp;
zio->io_bp = &zio->io_bp_copy; /* so caller can free */
} else {
zio->io_bp = (blkptr_t *)bp;
}
zio->io_bp_orig = *bp;
if (zio->io_child_type == ZIO_CHILD_LOGICAL)
zio->io_logical = zio;
if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
pipeline |= ZIO_GANG_STAGES;
}
zio->io_spa = spa;
zio->io_txg = txg;
zio->io_done = done;
zio->io_private = private;
zio->io_type = type;
zio->io_priority = priority;
zio->io_vd = vd;
zio->io_offset = offset;
zio->io_orig_abd = zio->io_abd = data;
zio->io_orig_size = zio->io_size = psize;
zio->io_lsize = lsize;
zio->io_orig_flags = zio->io_flags = flags;
zio->io_orig_stage = zio->io_stage = stage;
zio->io_orig_pipeline = zio->io_pipeline = pipeline;
zio->io_pipeline_trace = ZIO_STAGE_OPEN;
zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY);
zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
if (zb != NULL)
zio->io_bookmark = *zb;
if (pio != NULL) {
zio->io_metaslab_class = pio->io_metaslab_class;
if (zio->io_logical == NULL)
zio->io_logical = pio->io_logical;
if (zio->io_child_type == ZIO_CHILD_GANG)
zio->io_gang_leader = pio->io_gang_leader;
zio_add_child_first(pio, zio);
}
taskq_init_ent(&zio->io_tqent);
return (zio);
}
void
zio_destroy(zio_t *zio)
{
metaslab_trace_fini(&zio->io_alloc_list);
list_destroy(&zio->io_parent_list);
list_destroy(&zio->io_child_list);
mutex_destroy(&zio->io_lock);
cv_destroy(&zio->io_cv);
kmem_cache_free(zio_cache, zio);
}
zio_t *
zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done,
void *private, zio_flag_t flags)
{
zio_t *zio;
zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE);
return (zio);
}
zio_t *
zio_root(spa_t *spa, zio_done_func_t *done, void *private, zio_flag_t flags)
{
return (zio_null(NULL, spa, NULL, done, private, flags));
}
static int
zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp,
enum blk_verify_flag blk_verify, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
zfs_dbgmsg("bad blkptr at %px: "
"DVA[0]=%#llx/%#llx "
"DVA[1]=%#llx/%#llx "
"DVA[2]=%#llx/%#llx "
"prop=%#llx "
"pad=%#llx,%#llx "
"phys_birth=%#llx "
"birth=%#llx "
"fill=%#llx "
"cksum=%#llx/%#llx/%#llx/%#llx",
bp,
(long long)bp->blk_dva[0].dva_word[0],
(long long)bp->blk_dva[0].dva_word[1],
(long long)bp->blk_dva[1].dva_word[0],
(long long)bp->blk_dva[1].dva_word[1],
(long long)bp->blk_dva[2].dva_word[0],
(long long)bp->blk_dva[2].dva_word[1],
(long long)bp->blk_prop,
(long long)bp->blk_pad[0],
(long long)bp->blk_pad[1],
(long long)bp->blk_phys_birth,
(long long)bp->blk_birth,
(long long)bp->blk_fill,
(long long)bp->blk_cksum.zc_word[0],
(long long)bp->blk_cksum.zc_word[1],
(long long)bp->blk_cksum.zc_word[2],
(long long)bp->blk_cksum.zc_word[3]);
switch (blk_verify) {
case BLK_VERIFY_HALT:
zfs_panic_recover("%s: %s", spa_name(spa), buf);
break;
case BLK_VERIFY_LOG:
zfs_dbgmsg("%s: %s", spa_name(spa), buf);
break;
case BLK_VERIFY_ONLY:
break;
}
return (1);
}
/*
* Verify the block pointer fields contain reasonable values. This means
* it only contains known object types, checksum/compression identifiers,
* block sizes within the maximum allowed limits, valid DVAs, etc.
*
* If everything checks out B_TRUE is returned. The zfs_blkptr_verify
* argument controls the behavior when an invalid field is detected.
*
* Values for blk_verify_flag:
* BLK_VERIFY_ONLY: evaluate the block
* BLK_VERIFY_LOG: evaluate the block and log problems
* BLK_VERIFY_HALT: call zfs_panic_recover on error
*
* Values for blk_config_flag:
* BLK_CONFIG_HELD: caller holds SCL_VDEV for writer
* BLK_CONFIG_NEEDED: caller holds no config lock, SCL_VDEV will be
* obtained for reader
* BLK_CONFIG_SKIP: skip checks which require SCL_VDEV, for better
* performance
*/
boolean_t
zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
enum blk_config_flag blk_config, enum blk_verify_flag blk_verify)
{
int errors = 0;
if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid TYPE %llu",
bp, (longlong_t)BP_GET_TYPE(bp));
}
if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid CHECKSUM %llu",
bp, (longlong_t)BP_GET_CHECKSUM(bp));
}
if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid COMPRESS %llu",
bp, (longlong_t)BP_GET_COMPRESS(bp));
}
if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid LSIZE %llu",
bp, (longlong_t)BP_GET_LSIZE(bp));
}
if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid PSIZE %llu",
bp, (longlong_t)BP_GET_PSIZE(bp));
}
if (BP_IS_EMBEDDED(bp)) {
if (BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid ETYPE %llu",
bp, (longlong_t)BPE_GET_ETYPE(bp));
}
}
/*
* Do not verify individual DVAs if the config is not trusted. This
* will be done once the zio is executed in vdev_mirror_map_alloc.
*/
if (!spa->spa_trust_config)
return (errors == 0);
switch (blk_config) {
case BLK_CONFIG_HELD:
ASSERT(spa_config_held(spa, SCL_VDEV, RW_WRITER));
break;
case BLK_CONFIG_NEEDED:
spa_config_enter(spa, SCL_VDEV, bp, RW_READER);
break;
case BLK_CONFIG_SKIP:
return (errors == 0);
default:
panic("invalid blk_config %u", blk_config);
}
/*
* Pool-specific checks.
*
* Note: it would be nice to verify that the blk_birth and
* BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze()
* allows the birth time of log blocks (and dmu_sync()-ed blocks
* that are in the log) to be arbitrarily large.
*/
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
const dva_t *dva = &bp->blk_dva[i];
uint64_t vdevid = DVA_GET_VDEV(dva);
if (vdevid >= spa->spa_root_vdev->vdev_children) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has invalid VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
if (vd == NULL) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has invalid VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
if (vd->vdev_ops == &vdev_hole_ops) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has hole VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
if (vd->vdev_ops == &vdev_missing_ops) {
/*
* "missing" vdevs are valid during import, but we
* don't have their detailed info (e.g. asize), so
* we can't perform any more checks on them.
*/
continue;
}
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t asize = DVA_GET_ASIZE(dva);
if (DVA_GET_GANG(dva))
asize = vdev_gang_header_asize(vd);
if (offset + asize > vd->vdev_asize) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has invalid OFFSET %llu",
bp, i, (longlong_t)offset);
}
}
if (blk_config == BLK_CONFIG_NEEDED)
spa_config_exit(spa, SCL_VDEV, bp);
return (errors == 0);
}
boolean_t
zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp)
{
(void) bp;
uint64_t vdevid = DVA_GET_VDEV(dva);
if (vdevid >= spa->spa_root_vdev->vdev_children)
return (B_FALSE);
vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
if (vd == NULL)
return (B_FALSE);
if (vd->vdev_ops == &vdev_hole_ops)
return (B_FALSE);
if (vd->vdev_ops == &vdev_missing_ops) {
return (B_FALSE);
}
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t asize = DVA_GET_ASIZE(dva);
if (DVA_GET_GANG(dva))
asize = vdev_gang_header_asize(vd);
if (offset + asize > vd->vdev_asize)
return (B_FALSE);
return (B_TRUE);
}
zio_t *
zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
abd_t *data, uint64_t size, zio_done_func_t *done, void *private,
zio_priority_t priority, zio_flag_t flags, const zbookmark_phys_t *zb)
{
zio_t *zio;
zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp,
data, size, size, done, private,
ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE);
return (zio);
}
zio_t *
zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp,
zio_done_func_t *ready, zio_done_func_t *children_ready,
zio_done_func_t *done, void *private, zio_priority_t priority,
zio_flag_t flags, const zbookmark_phys_t *zb)
{
zio_t *zio;
ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF &&
zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS &&
zp->zp_compress >= ZIO_COMPRESS_OFF &&
zp->zp_compress < ZIO_COMPRESS_FUNCTIONS &&
DMU_OT_IS_VALID(zp->zp_type) &&
zp->zp_level < 32 &&
zp->zp_copies > 0 &&
zp->zp_copies <= spa_max_replication(spa));
zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private,
ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE);
zio->io_ready = ready;
zio->io_children_ready = children_ready;
zio->io_prop = *zp;
/*
* Data can be NULL if we are going to call zio_write_override() to
* provide the already-allocated BP. But we may need the data to
* verify a dedup hit (if requested). In this case, don't try to
* dedup (just take the already-allocated BP verbatim). Encrypted
* dedup blocks need data as well so we also disable dedup in this
* case.
*/
if (data == NULL &&
(zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) {
zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE;
}
return (zio);
}
zio_t *
zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data,
uint64_t size, zio_done_func_t *done, void *private,
zio_priority_t priority, zio_flag_t flags, zbookmark_phys_t *zb)
{
zio_t *zio;
zio = zio_create(pio, spa, txg, bp, data, size, size, done, private,
ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb,
ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
return (zio);
}
void
zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite,
boolean_t brtwrite)
{
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa));
ASSERT(!brtwrite || !nopwrite);
/*
* We must reset the io_prop to match the values that existed
* when the bp was first written by dmu_sync() keeping in mind
* that nopwrite and dedup are mutually exclusive.
*/
zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup;
zio->io_prop.zp_nopwrite = nopwrite;
zio->io_prop.zp_brtwrite = brtwrite;
zio->io_prop.zp_copies = copies;
zio->io_bp_override = bp;
}
void
zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
{
(void) zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
/*
* The check for EMBEDDED is a performance optimization. We
* process the free here (by ignoring it) rather than
* putting it on the list and then processing it in zio_free_sync().
*/
if (BP_IS_EMBEDDED(bp))
return;
/*
* Frees that are for the currently-syncing txg, are not going to be
* deferred, and which will not need to do a read (i.e. not GANG or
* DEDUP), can be processed immediately. Otherwise, put them on the
* in-memory list for later processing.
*
* Note that we only defer frees after zfs_sync_pass_deferred_free
* when the log space map feature is disabled. [see relevant comment
* in spa_sync_iterate_to_convergence()]
*/
if (BP_IS_GANG(bp) ||
BP_GET_DEDUP(bp) ||
txg != spa->spa_syncing_txg ||
(spa_sync_pass(spa) >= zfs_sync_pass_deferred_free &&
!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) ||
brt_maybe_exists(spa, bp)) {
metaslab_check_free(spa, bp);
bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
} else {
VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL);
}
}
/*
* To improve performance, this function may return NULL if we were able
* to do the free immediately. This avoids the cost of creating a zio
* (and linking it to the parent, etc).
*/
zio_t *
zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
zio_flag_t flags)
{
ASSERT(!BP_IS_HOLE(bp));
ASSERT(spa_syncing_txg(spa) == txg);
if (BP_IS_EMBEDDED(bp))
return (NULL);
metaslab_check_free(spa, bp);
arc_freed(spa, bp);
dsl_scan_freed(spa, bp);
if (BP_IS_GANG(bp) ||
BP_GET_DEDUP(bp) ||
brt_maybe_exists(spa, bp)) {
/*
* GANG, DEDUP and BRT blocks can induce a read (for the gang
* block header, the DDT or the BRT), so issue them
* asynchronously so that this thread is not tied up.
*/
enum zio_stage stage =
ZIO_FREE_PIPELINE | ZIO_STAGE_ISSUE_ASYNC;
return (zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
BP_GET_PSIZE(bp), NULL, NULL,
ZIO_TYPE_FREE, ZIO_PRIORITY_NOW,
flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage));
} else {
metaslab_free(spa, bp, txg, B_FALSE);
return (NULL);
}
}
zio_t *
zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
zio_done_func_t *done, void *private, zio_flag_t flags)
{
zio_t *zio;
(void) zfs_blkptr_verify(spa, bp, (flags & ZIO_FLAG_CONFIG_WRITER) ?
BLK_CONFIG_HELD : BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
if (BP_IS_EMBEDDED(bp))
return (zio_null(pio, spa, NULL, NULL, NULL, 0));
/*
* A claim is an allocation of a specific block. Claims are needed
* to support immediate writes in the intent log. The issue is that
* immediate writes contain committed data, but in a txg that was
* *not* committed. Upon opening the pool after an unclean shutdown,
* the intent log claims all blocks that contain immediate write data
* so that the SPA knows they're in use.
*
* All claims *must* be resolved in the first txg -- before the SPA
* starts allocating blocks -- so that nothing is allocated twice.
* If txg == 0 we just verify that the block is claimable.
*/
ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <,
spa_min_claim_txg(spa));
ASSERT(txg == spa_min_claim_txg(spa) || txg == 0);
ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(8) */
zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW,
flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
ASSERT0(zio->io_queued_timestamp);
return (zio);
}
zio_t *
zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
zio_done_func_t *done, void *private, zio_flag_t flags)
{
zio_t *zio;
int c;
if (vd->vdev_children == 0) {
zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE);
zio->io_cmd = cmd;
} else {
zio = zio_null(pio, spa, NULL, NULL, NULL, flags);
for (c = 0; c < vd->vdev_children; c++)
zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd,
done, private, flags));
}
return (zio);
}
zio_t *
zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
zio_done_func_t *done, void *private, zio_priority_t priority,
zio_flag_t flags, enum trim_flag trim_flags)
{
zio_t *zio;
ASSERT0(vd->vdev_children);
ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift));
ASSERT3U(size, !=, 0);
zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done,
private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL,
vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE);
zio->io_trim_flags = trim_flags;
return (zio);
}
zio_t *
zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
abd_t *data, int checksum, zio_done_func_t *done, void *private,
zio_priority_t priority, zio_flag_t flags, boolean_t labels)
{
zio_t *zio;
ASSERT(vd->vdev_children == 0);
ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
ASSERT3U(offset + size, <=, vd->vdev_psize);
zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd,
offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
zio->io_prop.zp_checksum = checksum;
return (zio);
}
zio_t *
zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
abd_t *data, int checksum, zio_done_func_t *done, void *private,
zio_priority_t priority, zio_flag_t flags, boolean_t labels)
{
zio_t *zio;
ASSERT(vd->vdev_children == 0);
ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
ASSERT3U(offset + size, <=, vd->vdev_psize);
zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd,
offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
zio->io_prop.zp_checksum = checksum;
if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
/*
* zec checksums are necessarily destructive -- they modify
* the end of the write buffer to hold the verifier/checksum.
* Therefore, we must make a local copy in case the data is
* being written to multiple places in parallel.
*/
abd_t *wbuf = abd_alloc_sametype(data, size);
abd_copy(wbuf, data, size);
zio_push_transform(zio, wbuf, size, size, NULL);
}
return (zio);
}
/*
* Create a child I/O to do some work for us.
*/
zio_t *
zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
abd_t *data, uint64_t size, int type, zio_priority_t priority,
zio_flag_t flags, zio_done_func_t *done, void *private)
{
enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
zio_t *zio;
/*
* vdev child I/Os do not propagate their error to the parent.
* Therefore, for correct operation the caller *must* check for
* and handle the error in the child i/o's done callback.
* The only exceptions are i/os that we don't care about
* (OPTIONAL or REPAIR).
*/
ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) ||
done != NULL);
if (type == ZIO_TYPE_READ && bp != NULL) {
/*
* If we have the bp, then the child should perform the
* checksum and the parent need not. This pushes error
* detection as close to the leaves as possible and
* eliminates redundant checksums in the interior nodes.
*/
pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
}
if (vd->vdev_ops->vdev_op_leaf) {
ASSERT0(vd->vdev_children);
offset += VDEV_LABEL_START_SIZE;
}
flags |= ZIO_VDEV_CHILD_FLAGS(pio);
/*
* If we've decided to do a repair, the write is not speculative --
* even if the original read was.
*/
if (flags & ZIO_FLAG_IO_REPAIR)
flags &= ~ZIO_FLAG_SPECULATIVE;
/*
* If we're creating a child I/O that is not associated with a
* top-level vdev, then the child zio is not an allocating I/O.
* If this is a retried I/O then we ignore it since we will
* have already processed the original allocating I/O.
*/
if (flags & ZIO_FLAG_IO_ALLOCATING &&
(vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) {
ASSERT(pio->io_metaslab_class != NULL);
ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled);
ASSERT(type == ZIO_TYPE_WRITE);
ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(!(flags & ZIO_FLAG_IO_REPAIR));
ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) ||
pio->io_child_type == ZIO_CHILD_GANG);
flags &= ~ZIO_FLAG_IO_ALLOCATING;
}
zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size,
done, private, type, priority, flags, vd, offset, &pio->io_bookmark,
ZIO_STAGE_VDEV_IO_START >> 1, pipeline);
ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
return (zio);
}
zio_t *
zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size,
zio_type_t type, zio_priority_t priority, zio_flag_t flags,
zio_done_func_t *done, void *private)
{
zio_t *zio;
ASSERT(vd->vdev_ops->vdev_op_leaf);
zio = zio_create(NULL, vd->vdev_spa, 0, NULL,
data, size, size, done, private, type, priority,
flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED,
vd, offset, NULL,
ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE);
return (zio);
}
void
zio_flush(zio_t *zio, vdev_t *vd)
{
zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE,
NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY));
}
void
zio_shrink(zio_t *zio, uint64_t size)
{
ASSERT3P(zio->io_executor, ==, NULL);
ASSERT3U(zio->io_orig_size, ==, zio->io_size);
ASSERT3U(size, <=, zio->io_size);
/*
* We don't shrink for raidz because of problems with the
* reconstruction when reading back less than the block size.
* Note, BP_IS_RAIDZ() assumes no compression.
*/
ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
if (!BP_IS_RAIDZ(zio->io_bp)) {
/* we are not doing a raw write */
ASSERT3U(zio->io_size, ==, zio->io_lsize);
zio->io_orig_size = zio->io_size = zio->io_lsize = size;
}
}
+/*
+ * Round provided allocation size up to a value that can be allocated
+ * by at least some vdev(s) in the pool with minimum or no additional
+ * padding and without extra space usage on others
+ */
+static uint64_t
+zio_roundup_alloc_size(spa_t *spa, uint64_t size)
+{
+ if (size > spa->spa_min_alloc)
+ return (roundup(size, spa->spa_gcd_alloc));
+ return (spa->spa_min_alloc);
+}
+
/*
* ==========================================================================
* Prepare to read and write logical blocks
* ==========================================================================
*/
static zio_t *
zio_read_bp_init(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
uint64_t psize =
BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp);
ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
zio->io_child_type == ZIO_CHILD_LOGICAL &&
!(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
psize, psize, zio_decompress);
}
if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) ||
BP_HAS_INDIRECT_MAC_CKSUM(bp)) &&
zio->io_child_type == ZIO_CHILD_LOGICAL) {
zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
psize, psize, zio_decrypt);
}
if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) {
int psize = BPE_GET_PSIZE(bp);
void *data = abd_borrow_buf(zio->io_abd, psize);
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
decode_embedded_bp_compressed(bp, data);
abd_return_buf_copy(zio->io_abd, data, psize);
} else {
ASSERT(!BP_IS_EMBEDDED(bp));
}
if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
return (zio);
}
static zio_t *
zio_write_bp_init(zio_t *zio)
{
if (!IO_IS_ALLOCATING(zio))
return (zio);
ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
if (zio->io_bp_override) {
blkptr_t *bp = zio->io_bp;
zio_prop_t *zp = &zio->io_prop;
ASSERT(bp->blk_birth != zio->io_txg);
*bp = *zio->io_bp_override;
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
if (zp->zp_brtwrite)
return (zio);
ASSERT(!BP_GET_DEDUP(zio->io_bp_override));
if (BP_IS_EMBEDDED(bp))
return (zio);
/*
* If we've been overridden and nopwrite is set then
* set the flag accordingly to indicate that a nopwrite
* has already occurred.
*/
if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) {
ASSERT(!zp->zp_dedup);
ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum);
zio->io_flags |= ZIO_FLAG_NOPWRITE;
return (zio);
}
ASSERT(!zp->zp_nopwrite);
if (BP_IS_HOLE(bp) || !zp->zp_dedup)
return (zio);
ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
if (BP_GET_CHECKSUM(bp) == zp->zp_checksum &&
!zp->zp_encrypt) {
BP_SET_DEDUP(bp, 1);
zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
return (zio);
}
/*
* We were unable to handle this as an override bp, treat
* it as a regular write I/O.
*/
zio->io_bp_override = NULL;
*bp = zio->io_bp_orig;
zio->io_pipeline = zio->io_orig_pipeline;
}
return (zio);
}
static zio_t *
zio_write_compress(zio_t *zio)
{
spa_t *spa = zio->io_spa;
zio_prop_t *zp = &zio->io_prop;
enum zio_compress compress = zp->zp_compress;
blkptr_t *bp = zio->io_bp;
uint64_t lsize = zio->io_lsize;
uint64_t psize = zio->io_size;
uint32_t pass = 1;
/*
* If our children haven't all reached the ready stage,
* wait for them and then repeat this pipeline stage.
*/
if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) {
return (NULL);
}
if (!IO_IS_ALLOCATING(zio))
return (zio);
if (zio->io_children_ready != NULL) {
/*
* Now that all our children are ready, run the callback
* associated with this zio in case it wants to modify the
* data to be written.
*/
ASSERT3U(zp->zp_level, >, 0);
zio->io_children_ready(zio);
}
ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
ASSERT(zio->io_bp_override == NULL);
if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) {
/*
* We're rewriting an existing block, which means we're
* working on behalf of spa_sync(). For spa_sync() to
* converge, it must eventually be the case that we don't
* have to allocate new blocks. But compression changes
* the blocksize, which forces a reallocate, and makes
* convergence take longer. Therefore, after the first
* few passes, stop compressing to ensure convergence.
*/
pass = spa_sync_pass(spa);
ASSERT(zio->io_txg == spa_syncing_txg(spa));
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!BP_GET_DEDUP(bp));
if (pass >= zfs_sync_pass_dont_compress)
compress = ZIO_COMPRESS_OFF;
/* Make sure someone doesn't change their mind on overwrites */
- ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp),
- spa_max_replication(spa)) == BP_GET_NDVAS(bp));
+ ASSERT(BP_IS_EMBEDDED(bp) || BP_IS_GANG(bp) ||
+ MIN(zp->zp_copies, spa_max_replication(spa))
+ == BP_GET_NDVAS(bp));
}
/* If it's a compressed write that is not raw, compress the buffer. */
if (compress != ZIO_COMPRESS_OFF &&
!(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
void *cbuf = NULL;
psize = zio_compress_data(compress, zio->io_abd, &cbuf, lsize,
zp->zp_complevel);
if (psize == 0) {
compress = ZIO_COMPRESS_OFF;
} else if (psize >= lsize) {
compress = ZIO_COMPRESS_OFF;
if (cbuf != NULL)
zio_buf_free(cbuf, lsize);
} else if (!zp->zp_dedup && !zp->zp_encrypt &&
psize <= BPE_PAYLOAD_SIZE &&
zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) &&
spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) {
encode_embedded_bp_compressed(bp,
cbuf, compress, lsize, psize);
BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA);
BP_SET_TYPE(bp, zio->io_prop.zp_type);
BP_SET_LEVEL(bp, zio->io_prop.zp_level);
zio_buf_free(cbuf, lsize);
bp->blk_birth = zio->io_txg;
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
ASSERT(spa_feature_is_active(spa,
SPA_FEATURE_EMBEDDED_DATA));
return (zio);
} else {
/*
* Round compressed size up to the minimum allocation
* size of the smallest-ashift device, and zero the
* tail. This ensures that the compressed size of the
* BP (and thus compressratio property) are correct,
* in that we charge for the padding used to fill out
* the last sector.
*/
- ASSERT3U(spa->spa_min_alloc, >=, SPA_MINBLOCKSHIFT);
- size_t rounded = (size_t)roundup(psize,
- spa->spa_min_alloc);
+ size_t rounded = (size_t)zio_roundup_alloc_size(spa,
+ psize);
if (rounded >= lsize) {
compress = ZIO_COMPRESS_OFF;
zio_buf_free(cbuf, lsize);
psize = lsize;
} else {
abd_t *cdata = abd_get_from_buf(cbuf, lsize);
abd_take_ownership_of_buf(cdata, B_TRUE);
abd_zero_off(cdata, psize, rounded - psize);
psize = rounded;
zio_push_transform(zio, cdata,
psize, lsize, NULL);
}
}
/*
* We were unable to handle this as an override bp, treat
* it as a regular write I/O.
*/
zio->io_bp_override = NULL;
*bp = zio->io_bp_orig;
zio->io_pipeline = zio->io_orig_pipeline;
} else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 &&
zp->zp_type == DMU_OT_DNODE) {
/*
* The DMU actually relies on the zio layer's compression
* to free metadnode blocks that have had all contained
* dnodes freed. As a result, even when doing a raw
* receive, we must check whether the block can be compressed
* to a hole.
*/
psize = zio_compress_data(ZIO_COMPRESS_EMPTY,
zio->io_abd, NULL, lsize, zp->zp_complevel);
if (psize == 0 || psize >= lsize)
compress = ZIO_COMPRESS_OFF;
} else if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS &&
!(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) {
/*
* If we are raw receiving an encrypted dataset we should not
* take this codepath because it will change the on-disk block
* and decryption will fail.
*/
- size_t rounded = MIN((size_t)roundup(psize,
- spa->spa_min_alloc), lsize);
+ size_t rounded = MIN((size_t)zio_roundup_alloc_size(spa, psize),
+ lsize);
if (rounded != psize) {
abd_t *cdata = abd_alloc_linear(rounded, B_TRUE);
abd_zero_off(cdata, psize, rounded - psize);
abd_copy_off(cdata, zio->io_abd, 0, 0, psize);
psize = rounded;
zio_push_transform(zio, cdata,
psize, rounded, NULL);
}
} else {
ASSERT3U(psize, !=, 0);
}
/*
* The final pass of spa_sync() must be all rewrites, but the first
* few passes offer a trade-off: allocating blocks defers convergence,
* but newly allocated blocks are sequential, so they can be written
* to disk faster. Therefore, we allow the first few passes of
* spa_sync() to allocate new blocks, but force rewrites after that.
* There should only be a handful of blocks after pass 1 in any case.
*/
if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg &&
BP_GET_PSIZE(bp) == psize &&
pass >= zfs_sync_pass_rewrite) {
VERIFY3U(psize, !=, 0);
enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
zio->io_flags |= ZIO_FLAG_IO_REWRITE;
} else {
BP_ZERO(bp);
zio->io_pipeline = ZIO_WRITE_PIPELINE;
}
if (psize == 0) {
if (zio->io_bp_orig.blk_birth != 0 &&
spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
BP_SET_LSIZE(bp, lsize);
BP_SET_TYPE(bp, zp->zp_type);
BP_SET_LEVEL(bp, zp->zp_level);
BP_SET_BIRTH(bp, zio->io_txg, 0);
}
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
} else {
ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER);
BP_SET_LSIZE(bp, lsize);
BP_SET_TYPE(bp, zp->zp_type);
BP_SET_LEVEL(bp, zp->zp_level);
BP_SET_PSIZE(bp, psize);
BP_SET_COMPRESS(bp, compress);
BP_SET_CHECKSUM(bp, zp->zp_checksum);
BP_SET_DEDUP(bp, zp->zp_dedup);
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
if (zp->zp_dedup) {
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(!zp->zp_encrypt ||
DMU_OT_IS_ENCRYPTED(zp->zp_type));
zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE;
}
if (zp->zp_nopwrite) {
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
}
}
return (zio);
}
static zio_t *
zio_free_bp_init(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio->io_child_type == ZIO_CHILD_LOGICAL) {
if (BP_GET_DEDUP(bp))
zio->io_pipeline = ZIO_DDT_FREE_PIPELINE;
}
ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
return (zio);
}
/*
* ==========================================================================
* Execute the I/O pipeline
* ==========================================================================
*/
static void
zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
{
spa_t *spa = zio->io_spa;
zio_type_t t = zio->io_type;
int flags = (cutinline ? TQ_FRONT : 0);
/*
* If we're a config writer or a probe, the normal issue and
* interrupt threads may all be blocked waiting for the config lock.
* In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
*/
if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE))
t = ZIO_TYPE_NULL;
/*
* A similar issue exists for the L2ARC write thread until L2ARC 2.0.
*/
if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux)
t = ZIO_TYPE_NULL;
/*
* If this is a high priority I/O, then use the high priority taskq if
* available.
*/
if ((zio->io_priority == ZIO_PRIORITY_NOW ||
zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) &&
spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
q++;
ASSERT3U(q, <, ZIO_TASKQ_TYPES);
/*
* NB: We are assuming that the zio can only be dispatched
* to a single taskq at a time. It would be a grievous error
* to dispatch the zio to another taskq at the same time.
*/
ASSERT(taskq_empty_ent(&zio->io_tqent));
spa_taskq_dispatch_ent(spa, t, q, zio_execute, zio, flags,
&zio->io_tqent);
}
static boolean_t
zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
{
spa_t *spa = zio->io_spa;
taskq_t *tq = taskq_of_curthread();
for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
uint_t i;
for (i = 0; i < tqs->stqs_count; i++) {
if (tqs->stqs_taskq[i] == tq)
return (B_TRUE);
}
}
return (B_FALSE);
}
static zio_t *
zio_issue_async(zio_t *zio)
{
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
return (NULL);
}
void
zio_interrupt(void *zio)
{
zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
}
void
zio_delay_interrupt(zio_t *zio)
{
/*
* The timeout_generic() function isn't defined in userspace, so
* rather than trying to implement the function, the zio delay
* functionality has been disabled for userspace builds.
*/
#ifdef _KERNEL
/*
* If io_target_timestamp is zero, then no delay has been registered
* for this IO, thus jump to the end of this function and "skip" the
* delay; issuing it directly to the zio layer.
*/
if (zio->io_target_timestamp != 0) {
hrtime_t now = gethrtime();
if (now >= zio->io_target_timestamp) {
/*
* This IO has already taken longer than the target
* delay to complete, so we don't want to delay it
* any longer; we "miss" the delay and issue it
* directly to the zio layer. This is likely due to
* the target latency being set to a value less than
* the underlying hardware can satisfy (e.g. delay
* set to 1ms, but the disks take 10ms to complete an
* IO request).
*/
DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
hrtime_t, now);
zio_interrupt(zio);
} else {
taskqid_t tid;
hrtime_t diff = zio->io_target_timestamp - now;
clock_t expire_at_tick = ddi_get_lbolt() +
NSEC_TO_TICK(diff);
DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
hrtime_t, now, hrtime_t, diff);
if (NSEC_TO_TICK(diff) == 0) {
/* Our delay is less than a jiffy - just spin */
zfs_sleep_until(zio->io_target_timestamp);
zio_interrupt(zio);
} else {
/*
* Use taskq_dispatch_delay() in the place of
* OpenZFS's timeout_generic().
*/
tid = taskq_dispatch_delay(system_taskq,
zio_interrupt, zio, TQ_NOSLEEP,
expire_at_tick);
if (tid == TASKQID_INVALID) {
/*
* Couldn't allocate a task. Just
* finish the zio without a delay.
*/
zio_interrupt(zio);
}
}
}
return;
}
#endif
DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
zio_interrupt(zio);
}
static void
zio_deadman_impl(zio_t *pio, int ziodepth)
{
zio_t *cio, *cio_next;
zio_link_t *zl = NULL;
vdev_t *vd = pio->io_vd;
if (zio_deadman_log_all || (vd != NULL && vd->vdev_ops->vdev_op_leaf)) {
vdev_queue_t *vq = vd ? &vd->vdev_queue : NULL;
zbookmark_phys_t *zb = &pio->io_bookmark;
uint64_t delta = gethrtime() - pio->io_timestamp;
uint64_t failmode = spa_get_deadman_failmode(pio->io_spa);
zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu "
"delta=%llu queued=%llu io=%llu "
"path=%s "
"last=%llu type=%d "
"priority=%d flags=0x%llx stage=0x%x "
"pipeline=0x%x pipeline-trace=0x%x "
"objset=%llu object=%llu "
"level=%llu blkid=%llu "
"offset=%llu size=%llu "
"error=%d",
ziodepth, pio, pio->io_timestamp,
(u_longlong_t)delta, pio->io_delta, pio->io_delay,
vd ? vd->vdev_path : "NULL",
vq ? vq->vq_io_complete_ts : 0, pio->io_type,
pio->io_priority, (u_longlong_t)pio->io_flags,
pio->io_stage, pio->io_pipeline, pio->io_pipeline_trace,
(u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
(u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid,
(u_longlong_t)pio->io_offset, (u_longlong_t)pio->io_size,
pio->io_error);
(void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN,
pio->io_spa, vd, zb, pio, 0);
if (failmode == ZIO_FAILURE_MODE_CONTINUE &&
taskq_empty_ent(&pio->io_tqent)) {
zio_interrupt(pio);
}
}
mutex_enter(&pio->io_lock);
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl);
zio_deadman_impl(cio, ziodepth + 1);
}
mutex_exit(&pio->io_lock);
}
/*
* Log the critical information describing this zio and all of its children
* using the zfs_dbgmsg() interface then post deadman event for the ZED.
*/
void
zio_deadman(zio_t *pio, const char *tag)
{
spa_t *spa = pio->io_spa;
char *name = spa_name(spa);
if (!zfs_deadman_enabled || spa_suspended(spa))
return;
zio_deadman_impl(pio, 0);
switch (spa_get_deadman_failmode(spa)) {
case ZIO_FAILURE_MODE_WAIT:
zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name);
break;
case ZIO_FAILURE_MODE_CONTINUE:
zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name);
break;
case ZIO_FAILURE_MODE_PANIC:
fm_panic("%s determined I/O to pool '%s' is hung.", tag, name);
break;
}
}
/*
* Execute the I/O pipeline until one of the following occurs:
* (1) the I/O completes; (2) the pipeline stalls waiting for
* dependent child I/Os; (3) the I/O issues, so we're waiting
* for an I/O completion interrupt; (4) the I/O is delegated by
* vdev-level caching or aggregation; (5) the I/O is deferred
* due to vdev-level queueing; (6) the I/O is handed off to
* another thread. In all cases, the pipeline stops whenever
* there's no CPU work; it never burns a thread in cv_wait_io().
*
* There's no locking on io_stage because there's no legitimate way
* for multiple threads to be attempting to process the same I/O.
*/
static zio_pipe_stage_t *zio_pipeline[];
/*
* zio_execute() is a wrapper around the static function
* __zio_execute() so that we can force __zio_execute() to be
* inlined. This reduces stack overhead which is important
* because __zio_execute() is called recursively in several zio
* code paths. zio_execute() itself cannot be inlined because
* it is externally visible.
*/
void
zio_execute(void *zio)
{
fstrans_cookie_t cookie;
cookie = spl_fstrans_mark();
__zio_execute(zio);
spl_fstrans_unmark(cookie);
}
/*
* Used to determine if in the current context the stack is sized large
* enough to allow zio_execute() to be called recursively. A minimum
* stack size of 16K is required to avoid needing to re-dispatch the zio.
*/
static boolean_t
zio_execute_stack_check(zio_t *zio)
{
#if !defined(HAVE_LARGE_STACKS)
dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
/* Executing in txg_sync_thread() context. */
if (dp && curthread == dp->dp_tx.tx_sync_thread)
return (B_TRUE);
/* Pool initialization outside of zio_taskq context. */
if (dp && spa_is_initializing(dp->dp_spa) &&
!zio_taskq_member(zio, ZIO_TASKQ_ISSUE) &&
!zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH))
return (B_TRUE);
#else
(void) zio;
#endif /* HAVE_LARGE_STACKS */
return (B_FALSE);
}
__attribute__((always_inline))
static inline void
__zio_execute(zio_t *zio)
{
ASSERT3U(zio->io_queued_timestamp, >, 0);
while (zio->io_stage < ZIO_STAGE_DONE) {
enum zio_stage pipeline = zio->io_pipeline;
enum zio_stage stage = zio->io_stage;
zio->io_executor = curthread;
ASSERT(!MUTEX_HELD(&zio->io_lock));
ASSERT(ISP2(stage));
ASSERT(zio->io_stall == NULL);
do {
stage <<= 1;
} while ((stage & pipeline) == 0);
ASSERT(stage <= ZIO_STAGE_DONE);
/*
* If we are in interrupt context and this pipeline stage
* will grab a config lock that is held across I/O,
* or may wait for an I/O that needs an interrupt thread
* to complete, issue async to avoid deadlock.
*
* For VDEV_IO_START, we cut in line so that the io will
* be sent to disk promptly.
*/
if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
zio_requeue_io_start_cut_in_line : B_FALSE;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
return;
}
/*
* If the current context doesn't have large enough stacks
* the zio must be issued asynchronously to prevent overflow.
*/
if (zio_execute_stack_check(zio)) {
boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
zio_requeue_io_start_cut_in_line : B_FALSE;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
return;
}
zio->io_stage = stage;
zio->io_pipeline_trace |= zio->io_stage;
/*
* The zio pipeline stage returns the next zio to execute
* (typically the same as this one), or NULL if we should
* stop.
*/
zio = zio_pipeline[highbit64(stage) - 1](zio);
if (zio == NULL)
return;
}
}
/*
* ==========================================================================
* Initiate I/O, either sync or async
* ==========================================================================
*/
int
zio_wait(zio_t *zio)
{
/*
* Some routines, like zio_free_sync(), may return a NULL zio
* to avoid the performance overhead of creating and then destroying
* an unneeded zio. For the callers' simplicity, we accept a NULL
* zio and ignore it.
*/
if (zio == NULL)
return (0);
long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms);
int error;
ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN);
ASSERT3P(zio->io_executor, ==, NULL);
zio->io_waiter = curthread;
ASSERT0(zio->io_queued_timestamp);
zio->io_queued_timestamp = gethrtime();
__zio_execute(zio);
mutex_enter(&zio->io_lock);
while (zio->io_executor != NULL) {
error = cv_timedwait_io(&zio->io_cv, &zio->io_lock,
ddi_get_lbolt() + timeout);
if (zfs_deadman_enabled && error == -1 &&
gethrtime() - zio->io_queued_timestamp >
spa_deadman_ziotime(zio->io_spa)) {
mutex_exit(&zio->io_lock);
timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms);
zio_deadman(zio, FTAG);
mutex_enter(&zio->io_lock);
}
}
mutex_exit(&zio->io_lock);
error = zio->io_error;
zio_destroy(zio);
return (error);
}
void
zio_nowait(zio_t *zio)
{
/*
* See comment in zio_wait().
*/
if (zio == NULL)
return;
ASSERT3P(zio->io_executor, ==, NULL);
if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
list_is_empty(&zio->io_parent_list)) {
zio_t *pio;
/*
* This is a logical async I/O with no parent to wait for it.
* We add it to the spa_async_root_zio "Godfather" I/O which
* will ensure they complete prior to unloading the pool.
*/
spa_t *spa = zio->io_spa;
pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE];
zio_add_child(pio, zio);
}
ASSERT0(zio->io_queued_timestamp);
zio->io_queued_timestamp = gethrtime();
__zio_execute(zio);
}
/*
* ==========================================================================
* Reexecute, cancel, or suspend/resume failed I/O
* ==========================================================================
*/
static void
zio_reexecute(void *arg)
{
zio_t *pio = arg;
zio_t *cio, *cio_next;
ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
ASSERT(pio->io_gang_leader == NULL);
ASSERT(pio->io_gang_tree == NULL);
pio->io_flags = pio->io_orig_flags;
pio->io_stage = pio->io_orig_stage;
pio->io_pipeline = pio->io_orig_pipeline;
pio->io_reexecute = 0;
pio->io_flags |= ZIO_FLAG_REEXECUTED;
pio->io_pipeline_trace = 0;
pio->io_error = 0;
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
pio->io_state[w] = 0;
for (int c = 0; c < ZIO_CHILD_TYPES; c++)
pio->io_child_error[c] = 0;
if (IO_IS_ALLOCATING(pio))
BP_ZERO(pio->io_bp);
/*
* As we reexecute pio's children, new children could be created.
* New children go to the head of pio's io_child_list, however,
* so we will (correctly) not reexecute them. The key is that
* the remainder of pio's io_child_list, from 'cio_next' onward,
* cannot be affected by any side effects of reexecuting 'cio'.
*/
zio_link_t *zl = NULL;
mutex_enter(&pio->io_lock);
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl);
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
pio->io_children[cio->io_child_type][w]++;
mutex_exit(&pio->io_lock);
zio_reexecute(cio);
mutex_enter(&pio->io_lock);
}
mutex_exit(&pio->io_lock);
/*
* Now that all children have been reexecuted, execute the parent.
* We don't reexecute "The Godfather" I/O here as it's the
* responsibility of the caller to wait on it.
*/
if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) {
pio->io_queued_timestamp = gethrtime();
__zio_execute(pio);
}
}
void
zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
{
if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
fm_panic("Pool '%s' has encountered an uncorrectable I/O "
"failure and the failure mode property for this pool "
"is set to panic.", spa_name(spa));
cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable I/O "
"failure and has been suspended.\n", spa_name(spa));
(void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL,
NULL, NULL, 0);
mutex_enter(&spa->spa_suspend_lock);
if (spa->spa_suspend_zio_root == NULL)
spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
spa->spa_suspended = reason;
if (zio != NULL) {
ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
ASSERT(zio != spa->spa_suspend_zio_root);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(zio_unique_parent(zio) == NULL);
ASSERT(zio->io_stage == ZIO_STAGE_DONE);
zio_add_child(spa->spa_suspend_zio_root, zio);
}
mutex_exit(&spa->spa_suspend_lock);
}
int
zio_resume(spa_t *spa)
{
zio_t *pio;
/*
* Reexecute all previously suspended i/o.
*/
mutex_enter(&spa->spa_suspend_lock);
spa->spa_suspended = ZIO_SUSPEND_NONE;
cv_broadcast(&spa->spa_suspend_cv);
pio = spa->spa_suspend_zio_root;
spa->spa_suspend_zio_root = NULL;
mutex_exit(&spa->spa_suspend_lock);
if (pio == NULL)
return (0);
zio_reexecute(pio);
return (zio_wait(pio));
}
void
zio_resume_wait(spa_t *spa)
{
mutex_enter(&spa->spa_suspend_lock);
while (spa_suspended(spa))
cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock);
mutex_exit(&spa->spa_suspend_lock);
}
/*
* ==========================================================================
* Gang blocks.
*
* A gang block is a collection of small blocks that looks to the DMU
* like one large block. When zio_dva_allocate() cannot find a block
* of the requested size, due to either severe fragmentation or the pool
* being nearly full, it calls zio_write_gang_block() to construct the
* block from smaller fragments.
*
* A gang block consists of a gang header (zio_gbh_phys_t) and up to
* three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like
* an indirect block: it's an array of block pointers. It consumes
* only one sector and hence is allocatable regardless of fragmentation.
* The gang header's bps point to its gang members, which hold the data.
*
* Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
* as the verifier to ensure uniqueness of the SHA256 checksum.
* Critically, the gang block bp's blk_cksum is the checksum of the data,
* not the gang header. This ensures that data block signatures (needed for
* deduplication) are independent of how the block is physically stored.
*
* Gang blocks can be nested: a gang member may itself be a gang block.
* Thus every gang block is a tree in which root and all interior nodes are
* gang headers, and the leaves are normal blocks that contain user data.
* The root of the gang tree is called the gang leader.
*
* To perform any operation (read, rewrite, free, claim) on a gang block,
* zio_gang_assemble() first assembles the gang tree (minus data leaves)
* in the io_gang_tree field of the original logical i/o by recursively
* reading the gang leader and all gang headers below it. This yields
* an in-core tree containing the contents of every gang header and the
* bps for every constituent of the gang block.
*
* With the gang tree now assembled, zio_gang_issue() just walks the gang tree
* and invokes a callback on each bp. To free a gang block, zio_gang_issue()
* calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
* zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
* zio_read_gang() is a wrapper around zio_read() that omits reading gang
* headers, since we already have those in io_gang_tree. zio_rewrite_gang()
* performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
* of the gang header plus zio_checksum_compute() of the data to update the
* gang header's blk_cksum as described above.
*
* The two-phase assemble/issue model solves the problem of partial failure --
* what if you'd freed part of a gang block but then couldn't read the
* gang header for another part? Assembling the entire gang tree first
* ensures that all the necessary gang header I/O has succeeded before
* starting the actual work of free, claim, or write. Once the gang tree
* is assembled, free and claim are in-memory operations that cannot fail.
*
* In the event that a gang write fails, zio_dva_unallocate() walks the
* gang tree to immediately free (i.e. insert back into the space map)
* everything we've allocated. This ensures that we don't get ENOSPC
* errors during repeated suspend/resume cycles due to a flaky device.
*
* Gang rewrites only happen during sync-to-convergence. If we can't assemble
* the gang tree, we won't modify the block, so we can safely defer the free
* (knowing that the block is still intact). If we *can* assemble the gang
* tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
* each constituent bp and we can allocate a new block on the next sync pass.
*
* In all cases, the gang tree allows complete recovery from partial failure.
* ==========================================================================
*/
static void
zio_gang_issue_func_done(zio_t *zio)
{
abd_free(zio->io_abd);
}
static zio_t *
zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
if (gn != NULL)
return (pio);
return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset),
BP_GET_PSIZE(bp), zio_gang_issue_func_done,
NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
&pio->io_bookmark));
}
static zio_t *
zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
zio_t *zio;
if (gn != NULL) {
abd_t *gbh_abd =
abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL,
pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
&pio->io_bookmark);
/*
* As we rewrite each gang header, the pipeline will compute
* a new gang block header checksum for it; but no one will
* compute a new data checksum, so we do that here. The one
* exception is the gang leader: the pipeline already computed
* its data checksum because that stage precedes gang assembly.
* (Presently, nothing actually uses interior data checksums;
* this is just good hygiene.)
*/
if (gn != pio->io_gang_leader->io_gang_tree) {
abd_t *buf = abd_get_offset(data, offset);
zio_checksum_compute(zio, BP_GET_CHECKSUM(bp),
buf, BP_GET_PSIZE(bp));
abd_free(buf);
}
/*
* If we are here to damage data for testing purposes,
* leave the GBH alone so that we can detect the damage.
*/
if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE)
zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
} else {
zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
abd_get_offset(data, offset), BP_GET_PSIZE(bp),
zio_gang_issue_func_done, NULL, pio->io_priority,
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
}
return (zio);
}
static zio_t *
zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
(void) gn, (void) data, (void) offset;
zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
ZIO_GANG_CHILD_FLAGS(pio));
if (zio == NULL) {
zio = zio_null(pio, pio->io_spa,
NULL, NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio));
}
return (zio);
}
static zio_t *
zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
(void) gn, (void) data, (void) offset;
return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
}
static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = {
NULL,
zio_read_gang,
zio_rewrite_gang,
zio_free_gang,
zio_claim_gang,
NULL
};
static void zio_gang_tree_assemble_done(zio_t *zio);
static zio_gang_node_t *
zio_gang_node_alloc(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn;
ASSERT(*gnpp == NULL);
gn = kmem_zalloc(sizeof (*gn), KM_SLEEP);
gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE);
*gnpp = gn;
return (gn);
}
static void
zio_gang_node_free(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = *gnpp;
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
ASSERT(gn->gn_child[g] == NULL);
zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
kmem_free(gn, sizeof (*gn));
*gnpp = NULL;
}
static void
zio_gang_tree_free(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = *gnpp;
if (gn == NULL)
return;
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
zio_gang_tree_free(&gn->gn_child[g]);
zio_gang_node_free(gnpp);
}
static void
zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = zio_gang_node_alloc(gnpp);
abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
ASSERT(gio->io_gang_leader == gio);
ASSERT(BP_IS_GANG(bp));
zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE,
zio_gang_tree_assemble_done, gn, gio->io_priority,
ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark));
}
static void
zio_gang_tree_assemble_done(zio_t *zio)
{
zio_t *gio = zio->io_gang_leader;
zio_gang_node_t *gn = zio->io_private;
blkptr_t *bp = zio->io_bp;
ASSERT(gio == zio_unique_parent(zio));
ASSERT(list_is_empty(&zio->io_child_list));
if (zio->io_error)
return;
/* this ABD was created from a linear buf in zio_gang_tree_assemble */
if (BP_SHOULD_BYTESWAP(bp))
byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size);
ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh);
ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
abd_free(zio->io_abd);
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
if (!BP_IS_GANG(gbp))
continue;
zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]);
}
}
static void
zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data,
uint64_t offset)
{
zio_t *gio = pio->io_gang_leader;
zio_t *zio;
ASSERT(BP_IS_GANG(bp) == !!gn);
ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree);
/*
* If you're a gang header, your data is in gn->gn_gbh.
* If you're a gang member, your data is in 'data' and gn == NULL.
*/
zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset);
if (gn != NULL) {
ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
if (BP_IS_HOLE(gbp))
continue;
zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data,
offset);
offset += BP_GET_PSIZE(gbp);
}
}
if (gn == gio->io_gang_tree)
ASSERT3U(gio->io_size, ==, offset);
if (zio != pio)
zio_nowait(zio);
}
static zio_t *
zio_gang_assemble(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL);
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
zio->io_gang_leader = zio;
zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
return (zio);
}
static zio_t *
zio_gang_issue(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
if (zio->io_child_error[ZIO_CHILD_GANG] == 0)
zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd,
0);
else
zio_gang_tree_free(&zio->io_gang_tree);
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
return (zio);
}
static void
zio_write_gang_member_ready(zio_t *zio)
{
zio_t *pio = zio_unique_parent(zio);
dva_t *cdva = zio->io_bp->blk_dva;
dva_t *pdva = pio->io_bp->blk_dva;
uint64_t asize;
zio_t *gio __maybe_unused = zio->io_gang_leader;
if (BP_IS_HOLE(zio->io_bp))
return;
ASSERT(BP_IS_HOLE(&zio->io_bp_orig));
ASSERT(zio->io_child_type == ZIO_CHILD_GANG);
ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies);
ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp));
VERIFY3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
mutex_enter(&pio->io_lock);
for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
ASSERT(DVA_GET_GANG(&pdva[d]));
asize = DVA_GET_ASIZE(&pdva[d]);
asize += DVA_GET_ASIZE(&cdva[d]);
DVA_SET_ASIZE(&pdva[d], asize);
}
mutex_exit(&pio->io_lock);
}
static void
zio_write_gang_done(zio_t *zio)
{
/*
* The io_abd field will be NULL for a zio with no data. The io_flags
* will initially have the ZIO_FLAG_NODATA bit flag set, but we can't
* check for it here as it is cleared in zio_ready.
*/
if (zio->io_abd != NULL)
abd_free(zio->io_abd);
}
static zio_t *
zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
{
spa_t *spa = pio->io_spa;
blkptr_t *bp = pio->io_bp;
zio_t *gio = pio->io_gang_leader;
zio_t *zio;
zio_gang_node_t *gn, **gnpp;
zio_gbh_phys_t *gbh;
abd_t *gbh_abd;
uint64_t txg = pio->io_txg;
uint64_t resid = pio->io_size;
uint64_t lsize;
int copies = gio->io_prop.zp_copies;
zio_prop_t zp;
int error;
boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA);
/*
* If one copy was requested, store 2 copies of the GBH, so that we
* can still traverse all the data (e.g. to free or scrub) even if a
* block is damaged. Note that we can't store 3 copies of the GBH in
* all cases, e.g. with encryption, which uses DVA[2] for the IV+salt.
*/
int gbh_copies = copies;
if (gbh_copies == 1) {
gbh_copies = MIN(2, spa_max_replication(spa));
}
int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER;
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(has_data);
flags |= METASLAB_ASYNC_ALLOC;
VERIFY(zfs_refcount_held(&mc->mc_allocator[pio->io_allocator].
mca_alloc_slots, pio));
/*
* The logical zio has already placed a reservation for
* 'copies' allocation slots but gang blocks may require
* additional copies. These additional copies
* (i.e. gbh_copies - copies) are guaranteed to succeed
* since metaslab_class_throttle_reserve() always allows
* additional reservations for gang blocks.
*/
VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies,
pio->io_allocator, pio, flags));
}
error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
&pio->io_alloc_list, pio, pio->io_allocator);
if (error) {
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(has_data);
/*
* If we failed to allocate the gang block header then
* we remove any additional allocation reservations that
* we placed here. The original reservation will
* be removed when the logical I/O goes to the ready
* stage.
*/
metaslab_class_throttle_unreserve(mc,
gbh_copies - copies, pio->io_allocator, pio);
}
pio->io_error = error;
return (pio);
}
if (pio == gio) {
gnpp = &gio->io_gang_tree;
} else {
gnpp = pio->io_private;
ASSERT(pio->io_ready == zio_write_gang_member_ready);
}
gn = zio_gang_node_alloc(gnpp);
gbh = gn->gn_gbh;
memset(gbh, 0, SPA_GANGBLOCKSIZE);
gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE);
/*
* Create the gang header.
*/
zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE,
zio_write_gang_done, NULL, pio->io_priority,
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
/*
* Create and nowait the gang children.
*/
for (int g = 0; resid != 0; resid -= lsize, g++) {
lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
SPA_MINBLOCKSIZE);
ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
zp.zp_checksum = gio->io_prop.zp_checksum;
zp.zp_compress = ZIO_COMPRESS_OFF;
zp.zp_complevel = gio->io_prop.zp_complevel;
zp.zp_type = DMU_OT_NONE;
zp.zp_level = 0;
zp.zp_copies = gio->io_prop.zp_copies;
zp.zp_dedup = B_FALSE;
zp.zp_dedup_verify = B_FALSE;
zp.zp_nopwrite = B_FALSE;
zp.zp_encrypt = gio->io_prop.zp_encrypt;
zp.zp_byteorder = gio->io_prop.zp_byteorder;
memset(zp.zp_salt, 0, ZIO_DATA_SALT_LEN);
memset(zp.zp_iv, 0, ZIO_DATA_IV_LEN);
memset(zp.zp_mac, 0, ZIO_DATA_MAC_LEN);
zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
has_data ? abd_get_offset(pio->io_abd, pio->io_size -
resid) : NULL, lsize, lsize, &zp,
zio_write_gang_member_ready, NULL,
zio_write_gang_done, &gn->gn_child[g], pio->io_priority,
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(has_data);
/*
* Gang children won't throttle but we should
* account for their work, so reserve an allocation
* slot for them here.
*/
VERIFY(metaslab_class_throttle_reserve(mc,
zp.zp_copies, cio->io_allocator, cio, flags));
}
zio_nowait(cio);
}
/*
* Set pio's pipeline to just wait for zio to finish.
*/
pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
- /*
- * We didn't allocate this bp, so make sure it doesn't get unmarked.
- */
- pio->io_flags &= ~ZIO_FLAG_FASTWRITE;
-
zio_nowait(zio);
return (pio);
}
/*
* The zio_nop_write stage in the pipeline determines if allocating a
* new bp is necessary. The nopwrite feature can handle writes in
* either syncing or open context (i.e. zil writes) and as a result is
* mutually exclusive with dedup.
*
* By leveraging a cryptographically secure checksum, such as SHA256, we
* can compare the checksums of the new data and the old to determine if
* allocating a new block is required. Note that our requirements for
* cryptographic strength are fairly weak: there can't be any accidental
* hash collisions, but we don't need to be secure against intentional
* (malicious) collisions. To trigger a nopwrite, you have to be able
* to write the file to begin with, and triggering an incorrect (hash
* collision) nopwrite is no worse than simply writing to the file.
* That said, there are no known attacks against the checksum algorithms
* used for nopwrite, assuming that the salt and the checksums
* themselves remain secret.
*/
static zio_t *
zio_nop_write(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
zio_prop_t *zp = &zio->io_prop;
ASSERT(BP_IS_HOLE(bp));
ASSERT(BP_GET_LEVEL(bp) == 0);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(zp->zp_nopwrite);
ASSERT(!zp->zp_dedup);
ASSERT(zio->io_bp_override == NULL);
ASSERT(IO_IS_ALLOCATING(zio));
/*
* Check to see if the original bp and the new bp have matching
* characteristics (i.e. same checksum, compression algorithms, etc).
* If they don't then just continue with the pipeline which will
* allocate a new bp.
*/
if (BP_IS_HOLE(bp_orig) ||
!(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE) ||
BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) ||
BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) ||
BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
zp->zp_copies != BP_GET_NDVAS(bp_orig))
return (zio);
/*
* If the checksums match then reset the pipeline so that we
* avoid allocating a new bp and issuing any I/O.
*/
if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) {
ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE);
ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
ASSERT3U(bp->blk_prop, ==, bp_orig->blk_prop);
/*
* If we're overwriting a block that is currently on an
* indirect vdev, then ignore the nopwrite request and
* allow a new block to be allocated on a concrete vdev.
*/
spa_config_enter(zio->io_spa, SCL_VDEV, FTAG, RW_READER);
for (int d = 0; d < BP_GET_NDVAS(bp_orig); d++) {
vdev_t *tvd = vdev_lookup_top(zio->io_spa,
DVA_GET_VDEV(&bp_orig->blk_dva[d]));
if (tvd->vdev_ops == &vdev_indirect_ops) {
spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
return (zio);
}
}
spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
*bp = *bp_orig;
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
zio->io_flags |= ZIO_FLAG_NOPWRITE;
}
return (zio);
}
/*
* ==========================================================================
* Block Reference Table
* ==========================================================================
*/
static zio_t *
zio_brt_free(zio_t *zio)
{
blkptr_t *bp;
bp = zio->io_bp;
if (BP_GET_LEVEL(bp) > 0 ||
BP_IS_METADATA(bp) ||
!brt_maybe_exists(zio->io_spa, bp)) {
return (zio);
}
if (!brt_entry_decref(zio->io_spa, bp)) {
/*
* This isn't the last reference, so we cannot free
* the data yet.
*/
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
}
return (zio);
}
/*
* ==========================================================================
* Dedup
* ==========================================================================
*/
static void
zio_ddt_child_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ddt_entry_t *dde = zio->io_private;
ddt_phys_t *ddp;
zio_t *pio = zio_unique_parent(zio);
mutex_enter(&pio->io_lock);
ddp = ddt_phys_select(dde, bp);
if (zio->io_error == 0)
ddt_phys_clear(ddp); /* this ddp doesn't need repair */
if (zio->io_error == 0 && dde->dde_repair_abd == NULL)
dde->dde_repair_abd = zio->io_abd;
else
abd_free(zio->io_abd);
mutex_exit(&pio->io_lock);
}
static zio_t *
zio_ddt_read_start(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
if (zio->io_child_error[ZIO_CHILD_DDT]) {
ddt_t *ddt = ddt_select(zio->io_spa, bp);
ddt_entry_t *dde = ddt_repair_start(ddt, bp);
ddt_phys_t *ddp = dde->dde_phys;
ddt_phys_t *ddp_self = ddt_phys_select(dde, bp);
blkptr_t blk;
ASSERT(zio->io_vsd == NULL);
zio->io_vsd = dde;
if (ddp_self == NULL)
return (zio);
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0 || ddp == ddp_self)
continue;
ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp,
&blk);
zio_nowait(zio_read(zio, zio->io_spa, &blk,
abd_alloc_for_io(zio->io_size, B_TRUE),
zio->io_size, zio_ddt_child_read_done, dde,
zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) |
ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark));
}
return (zio);
}
zio_nowait(zio_read(zio, zio->io_spa, bp,
zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority,
ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
return (zio);
}
static zio_t *
zio_ddt_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
if (zio->io_child_error[ZIO_CHILD_DDT]) {
ddt_t *ddt = ddt_select(zio->io_spa, bp);
ddt_entry_t *dde = zio->io_vsd;
if (ddt == NULL) {
ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
return (zio);
}
if (dde == NULL) {
zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
return (NULL);
}
if (dde->dde_repair_abd != NULL) {
abd_copy(zio->io_abd, dde->dde_repair_abd,
zio->io_size);
zio->io_child_error[ZIO_CHILD_DDT] = 0;
}
ddt_repair_done(ddt, dde);
zio->io_vsd = NULL;
}
ASSERT(zio->io_vsd == NULL);
return (zio);
}
static boolean_t
zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
{
spa_t *spa = zio->io_spa;
boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW);
ASSERT(!(zio->io_bp_override && do_raw));
/*
* Note: we compare the original data, not the transformed data,
* because when zio->io_bp is an override bp, we will not have
* pushed the I/O transforms. That's an important optimization
* because otherwise we'd compress/encrypt all dmu_sync() data twice.
* However, we should never get a raw, override zio so in these
* cases we can compare the io_abd directly. This is useful because
* it allows us to do dedup verification even if we don't have access
* to the original data (for instance, if the encryption keys aren't
* loaded).
*/
for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
zio_t *lio = dde->dde_lead_zio[p];
if (lio != NULL && do_raw) {
return (lio->io_size != zio->io_size ||
abd_cmp(zio->io_abd, lio->io_abd) != 0);
} else if (lio != NULL) {
return (lio->io_orig_size != zio->io_orig_size ||
abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0);
}
}
for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
ddt_phys_t *ddp = &dde->dde_phys[p];
if (ddp->ddp_phys_birth != 0 && do_raw) {
blkptr_t blk = *zio->io_bp;
uint64_t psize;
abd_t *tmpabd;
int error;
ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
psize = BP_GET_PSIZE(&blk);
if (psize != zio->io_size)
return (B_TRUE);
ddt_exit(ddt);
tmpabd = abd_alloc_for_io(psize, B_TRUE);
error = zio_wait(zio_read(NULL, spa, &blk, tmpabd,
psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_RAW, &zio->io_bookmark));
if (error == 0) {
if (abd_cmp(tmpabd, zio->io_abd) != 0)
error = SET_ERROR(ENOENT);
}
abd_free(tmpabd);
ddt_enter(ddt);
return (error != 0);
} else if (ddp->ddp_phys_birth != 0) {
arc_buf_t *abuf = NULL;
arc_flags_t aflags = ARC_FLAG_WAIT;
blkptr_t blk = *zio->io_bp;
int error;
ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
if (BP_GET_LSIZE(&blk) != zio->io_orig_size)
return (B_TRUE);
ddt_exit(ddt);
error = arc_read(NULL, spa, &blk,
arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
&aflags, &zio->io_bookmark);
if (error == 0) {
if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data,
zio->io_orig_size) != 0)
error = SET_ERROR(ENOENT);
arc_buf_destroy(abuf, &abuf);
}
ddt_enter(ddt);
return (error != 0);
}
}
return (B_FALSE);
}
static void
zio_ddt_child_write_ready(zio_t *zio)
{
int p = zio->io_prop.zp_copies;
ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
ddt_entry_t *dde = zio->io_private;
ddt_phys_t *ddp = &dde->dde_phys[p];
zio_t *pio;
if (zio->io_error)
return;
ddt_enter(ddt);
ASSERT(dde->dde_lead_zio[p] == zio);
ddt_phys_fill(ddp, zio->io_bp);
zio_link_t *zl = NULL;
while ((pio = zio_walk_parents(zio, &zl)) != NULL)
ddt_bp_fill(ddp, pio->io_bp, zio->io_txg);
ddt_exit(ddt);
}
static void
zio_ddt_child_write_done(zio_t *zio)
{
int p = zio->io_prop.zp_copies;
ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
ddt_entry_t *dde = zio->io_private;
ddt_phys_t *ddp = &dde->dde_phys[p];
ddt_enter(ddt);
ASSERT(ddp->ddp_refcnt == 0);
ASSERT(dde->dde_lead_zio[p] == zio);
dde->dde_lead_zio[p] = NULL;
if (zio->io_error == 0) {
zio_link_t *zl = NULL;
while (zio_walk_parents(zio, &zl) != NULL)
ddt_phys_addref(ddp);
} else {
ddt_phys_clear(ddp);
}
ddt_exit(ddt);
}
static zio_t *
zio_ddt_write(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
uint64_t txg = zio->io_txg;
zio_prop_t *zp = &zio->io_prop;
int p = zp->zp_copies;
zio_t *cio = NULL;
ddt_t *ddt = ddt_select(spa, bp);
ddt_entry_t *dde;
ddt_phys_t *ddp;
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum);
ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override);
ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW)));
ddt_enter(ddt);
dde = ddt_lookup(ddt, bp, B_TRUE);
ddp = &dde->dde_phys[p];
if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) {
/*
* If we're using a weak checksum, upgrade to a strong checksum
* and try again. If we're already using a strong checksum,
* we can't resolve it, so just convert to an ordinary write.
* (And automatically e-mail a paper to Nature?)
*/
if (!(zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_DEDUP)) {
zp->zp_checksum = spa_dedup_checksum(spa);
zio_pop_transforms(zio);
zio->io_stage = ZIO_STAGE_OPEN;
BP_ZERO(bp);
} else {
zp->zp_dedup = B_FALSE;
BP_SET_DEDUP(bp, B_FALSE);
}
ASSERT(!BP_GET_DEDUP(bp));
zio->io_pipeline = ZIO_WRITE_PIPELINE;
ddt_exit(ddt);
return (zio);
}
if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) {
if (ddp->ddp_phys_birth != 0)
ddt_bp_fill(ddp, bp, txg);
if (dde->dde_lead_zio[p] != NULL)
zio_add_child(zio, dde->dde_lead_zio[p]);
else
ddt_phys_addref(ddp);
} else if (zio->io_bp_override) {
ASSERT(bp->blk_birth == txg);
ASSERT(BP_EQUAL(bp, zio->io_bp_override));
ddt_phys_fill(ddp, bp);
ddt_phys_addref(ddp);
} else {
cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd,
zio->io_orig_size, zio->io_orig_size, zp,
zio_ddt_child_write_ready, NULL,
zio_ddt_child_write_done, dde, zio->io_priority,
ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL);
dde->dde_lead_zio[p] = cio;
}
ddt_exit(ddt);
zio_nowait(cio);
return (zio);
}
static ddt_entry_t *freedde; /* for debugging */
static zio_t *
zio_ddt_free(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
ddt_t *ddt = ddt_select(spa, bp);
ddt_entry_t *dde;
ddt_phys_t *ddp;
ASSERT(BP_GET_DEDUP(bp));
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ddt_enter(ddt);
freedde = dde = ddt_lookup(ddt, bp, B_TRUE);
if (dde) {
ddp = ddt_phys_select(dde, bp);
if (ddp)
ddt_phys_decref(ddp);
}
ddt_exit(ddt);
return (zio);
}
/*
* ==========================================================================
* Allocate and free blocks
* ==========================================================================
*/
static zio_t *
zio_io_to_allocate(spa_t *spa, int allocator)
{
zio_t *zio;
ASSERT(MUTEX_HELD(&spa->spa_allocs[allocator].spaa_lock));
zio = avl_first(&spa->spa_allocs[allocator].spaa_tree);
if (zio == NULL)
return (NULL);
ASSERT(IO_IS_ALLOCATING(zio));
/*
* Try to place a reservation for this zio. If we're unable to
* reserve then we throttle.
*/
ASSERT3U(zio->io_allocator, ==, allocator);
if (!metaslab_class_throttle_reserve(zio->io_metaslab_class,
zio->io_prop.zp_copies, allocator, zio, 0)) {
return (NULL);
}
avl_remove(&spa->spa_allocs[allocator].spaa_tree, zio);
ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
return (zio);
}
static zio_t *
zio_dva_throttle(zio_t *zio)
{
spa_t *spa = zio->io_spa;
zio_t *nio;
metaslab_class_t *mc;
/* locate an appropriate allocation class */
mc = spa_preferred_class(spa, zio->io_size, zio->io_prop.zp_type,
zio->io_prop.zp_level, zio->io_prop.zp_zpl_smallblk);
if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE ||
!mc->mc_alloc_throttle_enabled ||
zio->io_child_type == ZIO_CHILD_GANG ||
zio->io_flags & ZIO_FLAG_NODATA) {
return (zio);
}
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
ASSERT3U(zio->io_queued_timestamp, >, 0);
ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
zbookmark_phys_t *bm = &zio->io_bookmark;
/*
* We want to try to use as many allocators as possible to help improve
* performance, but we also want logically adjacent IOs to be physically
* adjacent to improve sequential read performance. We chunk each object
* into 2^20 block regions, and then hash based on the objset, object,
* level, and region to accomplish both of these goals.
*/
int allocator = (uint_t)cityhash4(bm->zb_objset, bm->zb_object,
bm->zb_level, bm->zb_blkid >> 20) % spa->spa_alloc_count;
zio->io_allocator = allocator;
zio->io_metaslab_class = mc;
mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
avl_add(&spa->spa_allocs[allocator].spaa_tree, zio);
nio = zio_io_to_allocate(spa, allocator);
mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
return (nio);
}
static void
zio_allocate_dispatch(spa_t *spa, int allocator)
{
zio_t *zio;
mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
zio = zio_io_to_allocate(spa, allocator);
mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
if (zio == NULL)
return;
ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE);
ASSERT0(zio->io_error);
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
}
static zio_t *
zio_dva_allocate(zio_t *zio)
{
spa_t *spa = zio->io_spa;
metaslab_class_t *mc;
blkptr_t *bp = zio->io_bp;
int error;
int flags = 0;
if (zio->io_gang_leader == NULL) {
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
zio->io_gang_leader = zio;
}
ASSERT(BP_IS_HOLE(bp));
ASSERT0(BP_GET_NDVAS(bp));
ASSERT3U(zio->io_prop.zp_copies, >, 0);
ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
- flags |= (zio->io_flags & ZIO_FLAG_FASTWRITE) ? METASLAB_FASTWRITE : 0;
if (zio->io_flags & ZIO_FLAG_NODATA)
flags |= METASLAB_DONT_THROTTLE;
if (zio->io_flags & ZIO_FLAG_GANG_CHILD)
flags |= METASLAB_GANG_CHILD;
if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE)
flags |= METASLAB_ASYNC_ALLOC;
/*
* if not already chosen, locate an appropriate allocation class
*/
mc = zio->io_metaslab_class;
if (mc == NULL) {
mc = spa_preferred_class(spa, zio->io_size,
zio->io_prop.zp_type, zio->io_prop.zp_level,
zio->io_prop.zp_zpl_smallblk);
zio->io_metaslab_class = mc;
}
/*
* Try allocating the block in the usual metaslab class.
* If that's full, allocate it in the normal class.
* If that's full, allocate as a gang block,
* and if all are full, the allocation fails (which shouldn't happen).
*
* Note that we do not fall back on embedded slog (ZIL) space, to
* preserve unfragmented slog space, which is critical for decent
* sync write performance. If a log allocation fails, we will fall
* back to spa_sync() which is abysmal for performance.
*/
error = metaslab_alloc(spa, mc, zio->io_size, bp,
zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
&zio->io_alloc_list, zio, zio->io_allocator);
/*
* Fallback to normal class when an alloc class is full
*/
if (error == ENOSPC && mc != spa_normal_class(spa)) {
/*
* If throttling, transfer reservation over to normal class.
* The io_allocator slot can remain the same even though we
* are switching classes.
*/
if (mc->mc_alloc_throttle_enabled &&
(zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) {
metaslab_class_throttle_unreserve(mc,
zio->io_prop.zp_copies, zio->io_allocator, zio);
zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING;
VERIFY(metaslab_class_throttle_reserve(
spa_normal_class(spa),
zio->io_prop.zp_copies, zio->io_allocator, zio,
flags | METASLAB_MUST_RESERVE));
}
zio->io_metaslab_class = mc = spa_normal_class(spa);
if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
zfs_dbgmsg("%s: metaslab allocation failure, "
"trying normal class: zio %px, size %llu, error %d",
spa_name(spa), zio, (u_longlong_t)zio->io_size,
error);
}
error = metaslab_alloc(spa, mc, zio->io_size, bp,
zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
&zio->io_alloc_list, zio, zio->io_allocator);
}
if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) {
if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
zfs_dbgmsg("%s: metaslab allocation failure, "
"trying ganging: zio %px, size %llu, error %d",
spa_name(spa), zio, (u_longlong_t)zio->io_size,
error);
}
return (zio_write_gang_block(zio, mc));
}
if (error != 0) {
if (error != ENOSPC ||
(zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) {
zfs_dbgmsg("%s: metaslab allocation failure: zio %px, "
"size %llu, error %d",
spa_name(spa), zio, (u_longlong_t)zio->io_size,
error);
}
zio->io_error = error;
}
return (zio);
}
static zio_t *
zio_dva_free(zio_t *zio)
{
metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE);
return (zio);
}
static zio_t *
zio_dva_claim(zio_t *zio)
{
int error;
error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
if (error)
zio->io_error = error;
return (zio);
}
/*
* Undo an allocation. This is used by zio_done() when an I/O fails
* and we want to give back the block we just allocated.
* This handles both normal blocks and gang blocks.
*/
static void
zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
{
ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp));
ASSERT(zio->io_bp_override == NULL);
if (!BP_IS_HOLE(bp))
metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE);
if (gn != NULL) {
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
zio_dva_unallocate(zio, gn->gn_child[g],
&gn->gn_gbh->zg_blkptr[g]);
}
}
}
/*
* Try to allocate an intent log block. Return 0 on success, errno on failure.
*/
int
zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
uint64_t size, boolean_t *slog)
{
int error = 1;
zio_alloc_list_t io_alloc_list;
ASSERT(txg > spa_syncing_txg(spa));
metaslab_trace_init(&io_alloc_list);
/*
* Block pointer fields are useful to metaslabs for stats and debugging.
* Fill in the obvious ones before calling into metaslab_alloc().
*/
BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
BP_SET_PSIZE(new_bp, size);
BP_SET_LEVEL(new_bp, 0);
/*
* When allocating a zil block, we don't have information about
* the final destination of the block except the objset it's part
* of, so we just hash the objset ID to pick the allocator to get
* some parallelism.
*/
- int flags = METASLAB_FASTWRITE | METASLAB_ZIL;
+ int flags = METASLAB_ZIL;
int allocator = (uint_t)cityhash4(0, 0, 0,
os->os_dsl_dataset->ds_object) % spa->spa_alloc_count;
error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
txg, NULL, flags, &io_alloc_list, NULL, allocator);
*slog = (error == 0);
if (error != 0) {
error = metaslab_alloc(spa, spa_embedded_log_class(spa), size,
new_bp, 1, txg, NULL, flags,
&io_alloc_list, NULL, allocator);
}
if (error != 0) {
error = metaslab_alloc(spa, spa_normal_class(spa), size,
new_bp, 1, txg, NULL, flags,
&io_alloc_list, NULL, allocator);
}
metaslab_trace_fini(&io_alloc_list);
if (error == 0) {
BP_SET_LSIZE(new_bp, size);
BP_SET_PSIZE(new_bp, size);
BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(new_bp,
spa_version(spa) >= SPA_VERSION_SLIM_ZIL
? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
BP_SET_LEVEL(new_bp, 0);
BP_SET_DEDUP(new_bp, 0);
BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
/*
* encrypted blocks will require an IV and salt. We generate
* these now since we will not be rewriting the bp at
* rewrite time.
*/
if (os->os_encrypted) {
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t salt[ZIO_DATA_SALT_LEN];
BP_SET_CRYPT(new_bp, B_TRUE);
VERIFY0(spa_crypt_get_salt(spa,
dmu_objset_id(os), salt));
VERIFY0(zio_crypt_generate_iv(iv));
zio_crypt_encode_params_bp(new_bp, salt, iv);
}
} else {
zfs_dbgmsg("%s: zil block allocation failure: "
"size %llu, error %d", spa_name(spa), (u_longlong_t)size,
error);
}
return (error);
}
/*
* ==========================================================================
* Read and write to physical devices
* ==========================================================================
*/
/*
* Issue an I/O to the underlying vdev. Typically the issue pipeline
* stops after this stage and will resume upon I/O completion.
* However, there are instances where the vdev layer may need to
* continue the pipeline when an I/O was not issued. Since the I/O
* that was sent to the vdev layer might be different than the one
* currently active in the pipeline (see vdev_queue_io()), we explicitly
* force the underlying vdev layers to call either zio_execute() or
* zio_interrupt() to ensure that the pipeline continues with the correct I/O.
*/
static zio_t *
zio_vdev_io_start(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
uint64_t align;
spa_t *spa = zio->io_spa;
zio->io_delay = 0;
ASSERT(zio->io_error == 0);
ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
if (vd == NULL) {
if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
/*
* The mirror_ops handle multiple DVAs in a single BP.
*/
vdev_mirror_ops.vdev_op_io_start(zio);
return (NULL);
}
ASSERT3P(zio->io_logical, !=, zio);
if (zio->io_type == ZIO_TYPE_WRITE) {
ASSERT(spa->spa_trust_config);
/*
* Note: the code can handle other kinds of writes,
* but we don't expect them.
*/
if (zio->io_vd->vdev_noalloc) {
ASSERT(zio->io_flags &
(ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL |
ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE));
}
}
align = 1ULL << vd->vdev_top->vdev_ashift;
if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) &&
P2PHASE(zio->io_size, align) != 0) {
/* Transform logical writes to be a full physical block size. */
uint64_t asize = P2ROUNDUP(zio->io_size, align);
abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize);
ASSERT(vd == vd->vdev_top);
if (zio->io_type == ZIO_TYPE_WRITE) {
abd_copy(abuf, zio->io_abd, zio->io_size);
abd_zero_off(abuf, zio->io_size, asize - zio->io_size);
}
zio_push_transform(zio, abuf, asize, asize, zio_subblock);
}
/*
* If this is not a physical io, make sure that it is properly aligned
* before proceeding.
*/
if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) {
ASSERT0(P2PHASE(zio->io_offset, align));
ASSERT0(P2PHASE(zio->io_size, align));
} else {
/*
* For physical writes, we allow 512b aligned writes and assume
* the device will perform a read-modify-write as necessary.
*/
ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE));
ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE));
}
VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
/*
* If this is a repair I/O, and there's no self-healing involved --
* that is, we're just resilvering what we expect to resilver --
* then don't do the I/O unless zio's txg is actually in vd's DTL.
* This prevents spurious resilvering.
*
* There are a few ways that we can end up creating these spurious
* resilver i/os:
*
* 1. A resilver i/o will be issued if any DVA in the BP has a
* dirty DTL. The mirror code will issue resilver writes to
* each DVA, including the one(s) that are not on vdevs with dirty
* DTLs.
*
* 2. With nested replication, which happens when we have a
* "replacing" or "spare" vdev that's a child of a mirror or raidz.
* For example, given mirror(replacing(A+B), C), it's likely that
* only A is out of date (it's the new device). In this case, we'll
* read from C, then use the data to resilver A+B -- but we don't
* actually want to resilver B, just A. The top-level mirror has no
* way to know this, so instead we just discard unnecessary repairs
* as we work our way down the vdev tree.
*
* 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc.
* The same logic applies to any form of nested replication: ditto
* + mirror, RAID-Z + replacing, etc.
*
* However, indirect vdevs point off to other vdevs which may have
* DTL's, so we never bypass them. The child i/os on concrete vdevs
* will be properly bypassed instead.
*
* Leaf DTL_PARTIAL can be empty when a legitimate write comes from
* a dRAID spare vdev. For example, when a dRAID spare is first
* used, its spare blocks need to be written to but the leaf vdev's
* of such blocks can have empty DTL_PARTIAL.
*
* There seemed no clean way to allow such writes while bypassing
* spurious ones. At this point, just avoid all bypassing for dRAID
* for correctness.
*/
if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
!(zio->io_flags & ZIO_FLAG_SELF_HEAL) &&
zio->io_txg != 0 && /* not a delegated i/o */
vd->vdev_ops != &vdev_indirect_ops &&
vd->vdev_top->vdev_ops != &vdev_draid_ops &&
!vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) {
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
zio_vdev_io_bypass(zio);
return (zio);
}
/*
* Select the next best leaf I/O to process. Distributed spares are
* excluded since they dispatch the I/O directly to a leaf vdev after
* applying the dRAID mapping.
*/
if (vd->vdev_ops->vdev_op_leaf &&
vd->vdev_ops != &vdev_draid_spare_ops &&
(zio->io_type == ZIO_TYPE_READ ||
zio->io_type == ZIO_TYPE_WRITE ||
zio->io_type == ZIO_TYPE_TRIM)) {
if ((zio = vdev_queue_io(zio)) == NULL)
return (NULL);
if (!vdev_accessible(vd, zio)) {
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
return (NULL);
}
zio->io_delay = gethrtime();
}
vd->vdev_ops->vdev_op_io_start(zio);
return (NULL);
}
static zio_t *
zio_vdev_io_done(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops;
boolean_t unexpected_error = B_FALSE;
if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
ASSERT(zio->io_type == ZIO_TYPE_READ ||
zio->io_type == ZIO_TYPE_WRITE || zio->io_type == ZIO_TYPE_TRIM);
if (zio->io_delay)
zio->io_delay = gethrtime() - zio->io_delay;
if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
vd->vdev_ops != &vdev_draid_spare_ops) {
vdev_queue_io_done(zio);
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_device_injections(vd, zio,
EIO, EILSEQ);
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_label_injection(zio, EIO);
if (zio->io_error && zio->io_type != ZIO_TYPE_TRIM) {
if (!vdev_accessible(vd, zio)) {
zio->io_error = SET_ERROR(ENXIO);
} else {
unexpected_error = B_TRUE;
}
}
}
ops->vdev_op_io_done(zio);
if (unexpected_error && vd->vdev_remove_wanted == B_FALSE)
VERIFY(vdev_probe(vd, zio) == NULL);
return (zio);
}
/*
* This function is used to change the priority of an existing zio that is
* currently in-flight. This is used by the arc to upgrade priority in the
* event that a demand read is made for a block that is currently queued
* as a scrub or async read IO. Otherwise, the high priority read request
* would end up having to wait for the lower priority IO.
*/
void
zio_change_priority(zio_t *pio, zio_priority_t priority)
{
zio_t *cio, *cio_next;
zio_link_t *zl = NULL;
ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) {
vdev_queue_change_io_priority(pio, priority);
} else {
pio->io_priority = priority;
}
mutex_enter(&pio->io_lock);
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl);
zio_change_priority(cio, priority);
}
mutex_exit(&pio->io_lock);
}
/*
* For non-raidz ZIOs, we can just copy aside the bad data read from the
* disk, and use that to finish the checksum ereport later.
*/
static void
zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
const abd_t *good_buf)
{
/* no processing needed */
zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
}
void
zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr)
{
void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size);
abd_copy(abd, zio->io_abd, zio->io_size);
zcr->zcr_cbinfo = zio->io_size;
zcr->zcr_cbdata = abd;
zcr->zcr_finish = zio_vsd_default_cksum_finish;
zcr->zcr_free = zio_abd_free;
}
static zio_t *
zio_vdev_io_assess(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
spa_config_exit(zio->io_spa, SCL_ZIO, zio);
if (zio->io_vsd != NULL) {
zio->io_vsd_ops->vsd_free(zio);
zio->io_vsd = NULL;
}
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_fault_injection(zio, EIO);
/*
* If the I/O failed, determine whether we should attempt to retry it.
*
* On retry, we cut in line in the issue queue, since we don't want
* compression/checksumming/etc. work to prevent our (cheap) IO reissue.
*/
if (zio->io_error && vd == NULL &&
!(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) {
ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */
zio->io_error = 0;
zio->io_flags |= ZIO_FLAG_IO_RETRY | ZIO_FLAG_DONT_AGGREGATE;
zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE,
zio_requeue_io_start_cut_in_line);
return (NULL);
}
/*
* If we got an error on a leaf device, convert it to ENXIO
* if the device is not accessible at all.
*/
if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
!vdev_accessible(vd, zio))
zio->io_error = SET_ERROR(ENXIO);
/*
* If we can't write to an interior vdev (mirror or RAID-Z),
* set vdev_cant_write so that we stop trying to allocate from it.
*/
if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE &&
vd != NULL && !vd->vdev_ops->vdev_op_leaf) {
vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting "
"cant_write=TRUE due to write failure with ENXIO",
zio);
vd->vdev_cant_write = B_TRUE;
}
/*
* If a cache flush returns ENOTSUP or ENOTTY, we know that no future
* attempts will ever succeed. In this case we set a persistent
* boolean flag so that we don't bother with it in the future.
*/
if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) &&
zio->io_type == ZIO_TYPE_IOCTL &&
zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL)
vd->vdev_nowritecache = B_TRUE;
if (zio->io_error)
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
return (zio);
}
void
zio_vdev_io_reissue(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
ASSERT(zio->io_error == 0);
zio->io_stage >>= 1;
}
void
zio_vdev_io_redone(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
zio->io_stage >>= 1;
}
void
zio_vdev_io_bypass(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
ASSERT(zio->io_error == 0);
zio->io_flags |= ZIO_FLAG_IO_BYPASS;
zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
}
/*
* ==========================================================================
* Encrypt and store encryption parameters
* ==========================================================================
*/
/*
* This function is used for ZIO_STAGE_ENCRYPT. It is responsible for
* managing the storage of encryption parameters and passing them to the
* lower-level encryption functions.
*/
static zio_t *
zio_encrypt(zio_t *zio)
{
zio_prop_t *zp = &zio->io_prop;
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
uint64_t psize = BP_GET_PSIZE(bp);
uint64_t dsobj = zio->io_bookmark.zb_objset;
dmu_object_type_t ot = BP_GET_TYPE(bp);
void *enc_buf = NULL;
abd_t *eabd = NULL;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
/* the root zio already encrypted the data */
if (zio->io_child_type == ZIO_CHILD_GANG)
return (zio);
/* only ZIL blocks are re-encrypted on rewrite */
if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG)
return (zio);
if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) {
BP_SET_CRYPT(bp, B_FALSE);
return (zio);
}
/* if we are doing raw encryption set the provided encryption params */
if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) {
ASSERT0(BP_GET_LEVEL(bp));
BP_SET_CRYPT(bp, B_TRUE);
BP_SET_BYTEORDER(bp, zp->zp_byteorder);
if (ot != DMU_OT_OBJSET)
zio_crypt_encode_mac_bp(bp, zp->zp_mac);
/* dnode blocks must be written out in the provided byteorder */
if (zp->zp_byteorder != ZFS_HOST_BYTEORDER &&
ot == DMU_OT_DNODE) {
void *bswap_buf = zio_buf_alloc(psize);
abd_t *babd = abd_get_from_buf(bswap_buf, psize);
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
abd_copy_to_buf(bswap_buf, zio->io_abd, psize);
dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf,
psize);
abd_take_ownership_of_buf(babd, B_TRUE);
zio_push_transform(zio, babd, psize, psize, NULL);
}
if (DMU_OT_IS_ENCRYPTED(ot))
zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv);
return (zio);
}
/* indirect blocks only maintain a cksum of the lower level MACs */
if (BP_GET_LEVEL(bp) > 0) {
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE,
zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp),
mac));
zio_crypt_encode_mac_bp(bp, mac);
return (zio);
}
/*
* Objset blocks are a special case since they have 2 256-bit MACs
* embedded within them.
*/
if (ot == DMU_OT_OBJSET) {
ASSERT0(DMU_OT_IS_ENCRYPTED(ot));
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj,
zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp)));
return (zio);
}
/* unencrypted object types are only authenticated with a MAC */
if (!DMU_OT_IS_ENCRYPTED(ot)) {
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj,
zio->io_abd, psize, mac));
zio_crypt_encode_mac_bp(bp, mac);
return (zio);
}
/*
* Later passes of sync-to-convergence may decide to rewrite data
* in place to avoid more disk reallocations. This presents a problem
* for encryption because this constitutes rewriting the new data with
* the same encryption key and IV. However, this only applies to blocks
* in the MOS (particularly the spacemaps) and we do not encrypt the
* MOS. We assert that the zio is allocating or an intent log write
* to enforce this.
*/
ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG);
ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION));
ASSERT3U(psize, !=, 0);
enc_buf = zio_buf_alloc(psize);
eabd = abd_get_from_buf(enc_buf, psize);
abd_take_ownership_of_buf(eabd, B_TRUE);
/*
* For an explanation of what encryption parameters are stored
* where, see the block comment in zio_crypt.c.
*/
if (ot == DMU_OT_INTENT_LOG) {
zio_crypt_decode_params_bp(bp, salt, iv);
} else {
BP_SET_CRYPT(bp, B_TRUE);
}
/* Perform the encryption. This should not fail */
VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark,
BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt));
/* encode encryption metadata into the bp */
if (ot == DMU_OT_INTENT_LOG) {
/*
* ZIL blocks store the MAC in the embedded checksum, so the
* transform must always be applied.
*/
zio_crypt_encode_mac_zil(enc_buf, mac);
zio_push_transform(zio, eabd, psize, psize, NULL);
} else {
BP_SET_CRYPT(bp, B_TRUE);
zio_crypt_encode_params_bp(bp, salt, iv);
zio_crypt_encode_mac_bp(bp, mac);
if (no_crypt) {
ASSERT3U(ot, ==, DMU_OT_DNODE);
abd_free(eabd);
} else {
zio_push_transform(zio, eabd, psize, psize, NULL);
}
}
return (zio);
}
/*
* ==========================================================================
* Generate and verify checksums
* ==========================================================================
*/
static zio_t *
zio_checksum_generate(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
enum zio_checksum checksum;
if (bp == NULL) {
/*
* This is zio_write_phys().
* We're either generating a label checksum, or none at all.
*/
checksum = zio->io_prop.zp_checksum;
if (checksum == ZIO_CHECKSUM_OFF)
return (zio);
ASSERT(checksum == ZIO_CHECKSUM_LABEL);
} else {
if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) {
ASSERT(!IO_IS_ALLOCATING(zio));
checksum = ZIO_CHECKSUM_GANG_HEADER;
} else {
checksum = BP_GET_CHECKSUM(bp);
}
}
zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size);
return (zio);
}
static zio_t *
zio_checksum_verify(zio_t *zio)
{
zio_bad_cksum_t info;
blkptr_t *bp = zio->io_bp;
int error;
ASSERT(zio->io_vd != NULL);
if (bp == NULL) {
/*
* This is zio_read_phys().
* We're either verifying a label checksum, or nothing at all.
*/
if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF)
return (zio);
ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL);
}
if ((error = zio_checksum_error(zio, &info)) != 0) {
zio->io_error = error;
if (error == ECKSUM &&
!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
mutex_enter(&zio->io_vd->vdev_stat_lock);
zio->io_vd->vdev_stat.vs_checksum_errors++;
mutex_exit(&zio->io_vd->vdev_stat_lock);
(void) zfs_ereport_start_checksum(zio->io_spa,
zio->io_vd, &zio->io_bookmark, zio,
zio->io_offset, zio->io_size, &info);
}
}
return (zio);
}
/*
* Called by RAID-Z to ensure we don't compute the checksum twice.
*/
void
zio_checksum_verified(zio_t *zio)
{
zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
}
/*
* ==========================================================================
* Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
* An error of 0 indicates success. ENXIO indicates whole-device failure,
* which may be transient (e.g. unplugged) or permanent. ECKSUM and EIO
* indicate errors that are specific to one I/O, and most likely permanent.
* Any other error is presumed to be worse because we weren't expecting it.
* ==========================================================================
*/
int
zio_worst_error(int e1, int e2)
{
static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO };
int r1, r2;
for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++)
if (e1 == zio_error_rank[r1])
break;
for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++)
if (e2 == zio_error_rank[r2])
break;
return (r1 > r2 ? e1 : e2);
}
/*
* ==========================================================================
* I/O completion
* ==========================================================================
*/
static zio_t *
zio_ready(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
zio_t *pio, *pio_next;
zio_link_t *zl = NULL;
- if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT,
- ZIO_WAIT_READY)) {
+ if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
+ ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, ZIO_WAIT_READY)) {
return (NULL);
}
if (zio->io_ready) {
ASSERT(IO_IS_ALLOCATING(zio));
ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) ||
(zio->io_flags & ZIO_FLAG_NOPWRITE));
ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
zio->io_ready(zio);
}
#ifdef ZFS_DEBUG
if (bp != NULL && bp != &zio->io_bp_copy)
zio->io_bp_copy = *bp;
#endif
if (zio->io_error != 0) {
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(IO_IS_ALLOCATING(zio));
ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(zio->io_metaslab_class != NULL);
/*
* We were unable to allocate anything, unreserve and
* issue the next I/O to allocate.
*/
metaslab_class_throttle_unreserve(
zio->io_metaslab_class, zio->io_prop.zp_copies,
zio->io_allocator, zio);
zio_allocate_dispatch(zio->io_spa, zio->io_allocator);
}
}
mutex_enter(&zio->io_lock);
zio->io_state[ZIO_WAIT_READY] = 1;
pio = zio_walk_parents(zio, &zl);
mutex_exit(&zio->io_lock);
/*
* As we notify zio's parents, new parents could be added.
* New parents go to the head of zio's io_parent_list, however,
* so we will (correctly) not notify them. The remainder of zio's
* io_parent_list, from 'pio_next' onward, cannot change because
* all parents must wait for us to be done before they can be done.
*/
for (; pio != NULL; pio = pio_next) {
pio_next = zio_walk_parents(zio, &zl);
zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL);
}
if (zio->io_flags & ZIO_FLAG_NODATA) {
if (bp != NULL && BP_IS_GANG(bp)) {
zio->io_flags &= ~ZIO_FLAG_NODATA;
} else {
ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE);
zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
}
}
if (zio_injection_enabled &&
zio->io_spa->spa_syncing_txg == zio->io_txg)
zio_handle_ignored_writes(zio);
return (zio);
}
/*
* Update the allocation throttle accounting.
*/
static void
zio_dva_throttle_done(zio_t *zio)
{
zio_t *lio __maybe_unused = zio->io_logical;
zio_t *pio = zio_unique_parent(zio);
vdev_t *vd = zio->io_vd;
int flags = METASLAB_ASYNC_ALLOC;
ASSERT3P(zio->io_bp, !=, NULL);
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE);
ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
ASSERT(vd != NULL);
ASSERT3P(vd, ==, vd->vdev_top);
ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY));
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING);
ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA));
/*
* Parents of gang children can have two flavors -- ones that
* allocated the gang header (will have ZIO_FLAG_IO_REWRITE set)
* and ones that allocated the constituent blocks. The allocation
* throttle needs to know the allocating parent zio so we must find
* it here.
*/
if (pio->io_child_type == ZIO_CHILD_GANG) {
/*
* If our parent is a rewrite gang child then our grandparent
* would have been the one that performed the allocation.
*/
if (pio->io_flags & ZIO_FLAG_IO_REWRITE)
pio = zio_unique_parent(pio);
flags |= METASLAB_GANG_CHILD;
}
ASSERT(IO_IS_ALLOCATING(pio));
ASSERT3P(zio, !=, zio->io_logical);
ASSERT(zio->io_logical != NULL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE);
ASSERT(zio->io_metaslab_class != NULL);
mutex_enter(&pio->io_lock);
metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags,
pio->io_allocator, B_TRUE);
mutex_exit(&pio->io_lock);
metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1,
pio->io_allocator, pio);
/*
* Call into the pipeline to see if there is more work that
* needs to be done. If there is work to be done it will be
* dispatched to another taskq thread.
*/
zio_allocate_dispatch(zio->io_spa, pio->io_allocator);
}
static zio_t *
zio_done(zio_t *zio)
{
/*
* Always attempt to keep stack usage minimal here since
* we can be called recursively up to 19 levels deep.
*/
const uint64_t psize = zio->io_size;
zio_t *pio, *pio_next;
zio_link_t *zl = NULL;
/*
* If our children haven't all completed,
* wait for them and then repeat this pipeline stage.
*/
if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) {
return (NULL);
}
/*
* If the allocation throttle is enabled, then update the accounting.
* We only track child I/Os that are part of an allocating async
* write. We must do this since the allocation is performed
* by the logical I/O but the actual write is done by child I/Os.
*/
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING &&
zio->io_child_type == ZIO_CHILD_VDEV) {
ASSERT(zio->io_metaslab_class != NULL);
ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled);
zio_dva_throttle_done(zio);
}
/*
* If the allocation throttle is enabled, verify that
* we have decremented the refcounts for every I/O that was throttled.
*/
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(zio->io_bp != NULL);
metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio,
zio->io_allocator);
VERIFY(zfs_refcount_not_held(&zio->io_metaslab_class->
mc_allocator[zio->io_allocator].mca_alloc_slots, zio));
}
for (int c = 0; c < ZIO_CHILD_TYPES; c++)
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
ASSERT(zio->io_children[c][w] == 0);
if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {
ASSERT(zio->io_bp->blk_pad[0] == 0);
ASSERT(zio->io_bp->blk_pad[1] == 0);
ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy,
sizeof (blkptr_t)) == 0 ||
(zio->io_bp == zio_unique_parent(zio)->io_bp));
if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) &&
zio->io_bp_override == NULL &&
!(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
ASSERT3U(zio->io_prop.zp_copies, <=,
BP_GET_NDVAS(zio->io_bp));
ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 ||
(BP_COUNT_GANG(zio->io_bp) ==
BP_GET_NDVAS(zio->io_bp)));
}
if (zio->io_flags & ZIO_FLAG_NOPWRITE)
VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
}
/*
* If there were child vdev/gang/ddt errors, they apply to us now.
*/
zio_inherit_child_errors(zio, ZIO_CHILD_VDEV);
zio_inherit_child_errors(zio, ZIO_CHILD_GANG);
zio_inherit_child_errors(zio, ZIO_CHILD_DDT);
/*
* If the I/O on the transformed data was successful, generate any
* checksum reports now while we still have the transformed data.
*/
if (zio->io_error == 0) {
while (zio->io_cksum_report != NULL) {
zio_cksum_report_t *zcr = zio->io_cksum_report;
uint64_t align = zcr->zcr_align;
uint64_t asize = P2ROUNDUP(psize, align);
abd_t *adata = zio->io_abd;
if (adata != NULL && asize != psize) {
adata = abd_alloc(asize, B_TRUE);
abd_copy(adata, zio->io_abd, psize);
abd_zero_off(adata, psize, asize - psize);
}
zio->io_cksum_report = zcr->zcr_next;
zcr->zcr_next = NULL;
zcr->zcr_finish(zcr, adata);
zfs_ereport_free_checksum(zcr);
if (adata != NULL && asize != psize)
abd_free(adata);
}
}
zio_pop_transforms(zio); /* note: may set zio->io_error */
vdev_stat_update(zio, psize);
/*
* If this I/O is attached to a particular vdev is slow, exceeding
* 30 seconds to complete, post an error described the I/O delay.
* We ignore these errors if the device is currently unavailable.
*/
if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) {
if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) {
/*
* We want to only increment our slow IO counters if
* the IO is valid (i.e. not if the drive is removed).
*
* zfs_ereport_post() will also do these checks, but
* it can also ratelimit and have other failures, so we
* need to increment the slow_io counters independent
* of it.
*/
if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY,
zio->io_spa, zio->io_vd, zio)) {
mutex_enter(&zio->io_vd->vdev_stat_lock);
zio->io_vd->vdev_stat.vs_slow_ios++;
mutex_exit(&zio->io_vd->vdev_stat_lock);
(void) zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
zio->io_spa, zio->io_vd, &zio->io_bookmark,
zio, 0);
}
}
}
if (zio->io_error) {
/*
* If this I/O is attached to a particular vdev,
* generate an error message describing the I/O failure
* at the block level. We ignore these errors if the
* device is currently unavailable.
*/
if (zio->io_error != ECKSUM && zio->io_vd != NULL &&
!vdev_is_dead(zio->io_vd)) {
int ret = zfs_ereport_post(FM_EREPORT_ZFS_IO,
zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
if (ret != EALREADY) {
mutex_enter(&zio->io_vd->vdev_stat_lock);
if (zio->io_type == ZIO_TYPE_READ)
zio->io_vd->vdev_stat.vs_read_errors++;
else if (zio->io_type == ZIO_TYPE_WRITE)
zio->io_vd->vdev_stat.vs_write_errors++;
mutex_exit(&zio->io_vd->vdev_stat_lock);
}
}
if ((zio->io_error == EIO || !(zio->io_flags &
(ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&
zio == zio->io_logical) {
/*
* For logical I/O requests, tell the SPA to log the
* error and generate a logical data ereport.
*/
spa_log_error(zio->io_spa, &zio->io_bookmark,
&zio->io_bp->blk_birth);
(void) zfs_ereport_post(FM_EREPORT_ZFS_DATA,
zio->io_spa, NULL, &zio->io_bookmark, zio, 0);
}
}
if (zio->io_error && zio == zio->io_logical) {
/*
* Determine whether zio should be reexecuted. This will
* propagate all the way to the root via zio_notify_parent().
*/
ASSERT(zio->io_vd == NULL && zio->io_bp != NULL);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
if (IO_IS_ALLOCATING(zio) &&
!(zio->io_flags & ZIO_FLAG_CANFAIL)) {
if (zio->io_error != ENOSPC)
zio->io_reexecute |= ZIO_REEXECUTE_NOW;
else
zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
}
if ((zio->io_type == ZIO_TYPE_READ ||
zio->io_type == ZIO_TYPE_FREE) &&
!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) &&
zio->io_error == ENXIO &&
spa_load_state(zio->io_spa) == SPA_LOAD_NONE &&
spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE)
zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute)
zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
/*
* Here is a possibly good place to attempt to do
* either combinatorial reconstruction or error correction
* based on checksums. It also might be a good place
* to send out preliminary ereports before we suspend
* processing.
*/
}
/*
* If there were logical child errors, they apply to us now.
* We defer this until now to avoid conflating logical child
* errors with errors that happened to the zio itself when
* updating vdev stats and reporting FMA events above.
*/
zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL);
if ((zio->io_error || zio->io_reexecute) &&
IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio &&
!(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)))
zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp);
zio_gang_tree_free(&zio->io_gang_tree);
/*
* Godfather I/Os should never suspend.
*/
if ((zio->io_flags & ZIO_FLAG_GODFATHER) &&
(zio->io_reexecute & ZIO_REEXECUTE_SUSPEND))
zio->io_reexecute &= ~ZIO_REEXECUTE_SUSPEND;
if (zio->io_reexecute) {
/*
* This is a logical I/O that wants to reexecute.
*
* Reexecute is top-down. When an i/o fails, if it's not
* the root, it simply notifies its parent and sticks around.
* The parent, seeing that it still has children in zio_done(),
* does the same. This percolates all the way up to the root.
* The root i/o will reexecute or suspend the entire tree.
*
* This approach ensures that zio_reexecute() honors
* all the original i/o dependency relationships, e.g.
* parents not executing until children are ready.
*/
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
zio->io_gang_leader = NULL;
mutex_enter(&zio->io_lock);
zio->io_state[ZIO_WAIT_DONE] = 1;
mutex_exit(&zio->io_lock);
/*
* "The Godfather" I/O monitors its children but is
* not a true parent to them. It will track them through
* the pipeline but severs its ties whenever they get into
* trouble (e.g. suspended). This allows "The Godfather"
* I/O to return status without blocking.
*/
zl = NULL;
for (pio = zio_walk_parents(zio, &zl); pio != NULL;
pio = pio_next) {
zio_link_t *remove_zl = zl;
pio_next = zio_walk_parents(zio, &zl);
if ((pio->io_flags & ZIO_FLAG_GODFATHER) &&
(zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) {
zio_remove_child(pio, zio, remove_zl);
/*
* This is a rare code path, so we don't
* bother with "next_to_execute".
*/
zio_notify_parent(pio, zio, ZIO_WAIT_DONE,
NULL);
}
}
if ((pio = zio_unique_parent(zio)) != NULL) {
/*
* We're not a root i/o, so there's nothing to do
* but notify our parent. Don't propagate errors
* upward since we haven't permanently failed yet.
*/
ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE;
/*
* This is a rare code path, so we don't bother with
* "next_to_execute".
*/
zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL);
} else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) {
/*
* We'd fail again if we reexecuted now, so suspend
* until conditions improve (e.g. device comes online).
*/
zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR);
} else {
/*
* Reexecution is potentially a huge amount of work.
* Hand it off to the otherwise-unused claim taskq.
*/
ASSERT(taskq_empty_ent(&zio->io_tqent));
spa_taskq_dispatch_ent(zio->io_spa,
ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE,
zio_reexecute, zio, 0, &zio->io_tqent);
}
return (NULL);
}
ASSERT(list_is_empty(&zio->io_child_list));
ASSERT(zio->io_reexecute == 0);
ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL));
/*
* Report any checksum errors, since the I/O is complete.
*/
while (zio->io_cksum_report != NULL) {
zio_cksum_report_t *zcr = zio->io_cksum_report;
zio->io_cksum_report = zcr->zcr_next;
zcr->zcr_next = NULL;
zcr->zcr_finish(zcr, NULL);
zfs_ereport_free_checksum(zcr);
}
- if (zio->io_flags & ZIO_FLAG_FASTWRITE && zio->io_bp &&
- !BP_IS_HOLE(zio->io_bp) && !BP_IS_EMBEDDED(zio->io_bp) &&
- !(zio->io_flags & ZIO_FLAG_NOPWRITE)) {
- metaslab_fastwrite_unmark(zio->io_spa, zio->io_bp);
- }
-
/*
* It is the responsibility of the done callback to ensure that this
* particular zio is no longer discoverable for adoption, and as
* such, cannot acquire any new parents.
*/
if (zio->io_done)
zio->io_done(zio);
mutex_enter(&zio->io_lock);
zio->io_state[ZIO_WAIT_DONE] = 1;
mutex_exit(&zio->io_lock);
/*
* We are done executing this zio. We may want to execute a parent
* next. See the comment in zio_notify_parent().
*/
zio_t *next_to_execute = NULL;
zl = NULL;
for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) {
zio_link_t *remove_zl = zl;
pio_next = zio_walk_parents(zio, &zl);
zio_remove_child(pio, zio, remove_zl);
zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute);
}
if (zio->io_waiter != NULL) {
mutex_enter(&zio->io_lock);
zio->io_executor = NULL;
cv_broadcast(&zio->io_cv);
mutex_exit(&zio->io_lock);
} else {
zio_destroy(zio);
}
return (next_to_execute);
}
/*
* ==========================================================================
* I/O pipeline definition
* ==========================================================================
*/
static zio_pipe_stage_t *zio_pipeline[] = {
NULL,
zio_read_bp_init,
zio_write_bp_init,
zio_free_bp_init,
zio_issue_async,
zio_write_compress,
zio_encrypt,
zio_checksum_generate,
zio_nop_write,
zio_brt_free,
zio_ddt_read_start,
zio_ddt_read_done,
zio_ddt_write,
zio_ddt_free,
zio_gang_assemble,
zio_gang_issue,
zio_dva_throttle,
zio_dva_allocate,
zio_dva_free,
zio_dva_claim,
zio_ready,
zio_vdev_io_start,
zio_vdev_io_done,
zio_vdev_io_assess,
zio_checksum_verify,
zio_done
};
/*
* Compare two zbookmark_phys_t's to see which we would reach first in a
* pre-order traversal of the object tree.
*
* This is simple in every case aside from the meta-dnode object. For all other
* objects, we traverse them in order (object 1 before object 2, and so on).
* However, all of these objects are traversed while traversing object 0, since
* the data it points to is the list of objects. Thus, we need to convert to a
* canonical representation so we can compare meta-dnode bookmarks to
* non-meta-dnode bookmarks.
*
* We do this by calculating "equivalents" for each field of the zbookmark.
* zbookmarks outside of the meta-dnode use their own object and level, and
* calculate the level 0 equivalent (the first L0 blkid that is contained in the
* blocks this bookmark refers to) by multiplying their blkid by their span
* (the number of L0 blocks contained within one block at their level).
* zbookmarks inside the meta-dnode calculate their object equivalent
* (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use
* level + 1<<31 (any value larger than a level could ever be) for their level.
* This causes them to always compare before a bookmark in their object
* equivalent, compare appropriately to bookmarks in other objects, and to
* compare appropriately to other bookmarks in the meta-dnode.
*/
int
zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2,
const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2)
{
/*
* These variables represent the "equivalent" values for the zbookmark,
* after converting zbookmarks inside the meta dnode to their
* normal-object equivalents.
*/
uint64_t zb1obj, zb2obj;
uint64_t zb1L0, zb2L0;
uint64_t zb1level, zb2level;
if (zb1->zb_object == zb2->zb_object &&
zb1->zb_level == zb2->zb_level &&
zb1->zb_blkid == zb2->zb_blkid)
return (0);
IMPLY(zb1->zb_level > 0, ibs1 >= SPA_MINBLOCKSHIFT);
IMPLY(zb2->zb_level > 0, ibs2 >= SPA_MINBLOCKSHIFT);
/*
* BP_SPANB calculates the span in blocks.
*/
zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level);
zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level);
if (zb1->zb_object == DMU_META_DNODE_OBJECT) {
zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
zb1L0 = 0;
zb1level = zb1->zb_level + COMPARE_META_LEVEL;
} else {
zb1obj = zb1->zb_object;
zb1level = zb1->zb_level;
}
if (zb2->zb_object == DMU_META_DNODE_OBJECT) {
zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
zb2L0 = 0;
zb2level = zb2->zb_level + COMPARE_META_LEVEL;
} else {
zb2obj = zb2->zb_object;
zb2level = zb2->zb_level;
}
/* Now that we have a canonical representation, do the comparison. */
if (zb1obj != zb2obj)
return (zb1obj < zb2obj ? -1 : 1);
else if (zb1L0 != zb2L0)
return (zb1L0 < zb2L0 ? -1 : 1);
else if (zb1level != zb2level)
return (zb1level > zb2level ? -1 : 1);
/*
* This can (theoretically) happen if the bookmarks have the same object
* and level, but different blkids, if the block sizes are not the same.
* There is presently no way to change the indirect block sizes
*/
return (0);
}
/*
* This function checks the following: given that last_block is the place that
* our traversal stopped last time, does that guarantee that we've visited
* every node under subtree_root? Therefore, we can't just use the raw output
* of zbookmark_compare. We have to pass in a modified version of
* subtree_root; by incrementing the block id, and then checking whether
* last_block is before or equal to that, we can tell whether or not having
* visited last_block implies that all of subtree_root's children have been
* visited.
*/
boolean_t
zbookmark_subtree_completed(const dnode_phys_t *dnp,
const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
{
zbookmark_phys_t mod_zb = *subtree_root;
mod_zb.zb_blkid++;
ASSERT0(last_block->zb_level);
/* The objset_phys_t isn't before anything. */
if (dnp == NULL)
return (B_FALSE);
/*
* We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the
* data block size in sectors, because that variable is only used if
* the bookmark refers to a block in the meta-dnode. Since we don't
* know without examining it what object it refers to, and there's no
* harm in passing in this value in other cases, we always pass it in.
*
* We pass in 0 for the indirect block size shift because zb2 must be
* level 0. The indirect block size is only used to calculate the span
* of the bookmark, but since the bookmark must be level 0, the span is
* always 1, so the math works out.
*
* If you make changes to how the zbookmark_compare code works, be sure
* to make sure that this code still works afterwards.
*/
return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb,
last_block) <= 0);
}
/*
* This function is similar to zbookmark_subtree_completed(), but returns true
* if subtree_root is equal or ahead of last_block, i.e. still to be done.
*/
boolean_t
zbookmark_subtree_tbd(const dnode_phys_t *dnp,
const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
{
ASSERT0(last_block->zb_level);
if (dnp == NULL)
return (B_FALSE);
return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, subtree_root,
last_block) >= 0);
}
EXPORT_SYMBOL(zio_type_name);
EXPORT_SYMBOL(zio_buf_alloc);
EXPORT_SYMBOL(zio_data_buf_alloc);
EXPORT_SYMBOL(zio_buf_free);
EXPORT_SYMBOL(zio_data_buf_free);
ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW,
"Max I/O completion time (milliseconds) before marking it as slow");
ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW,
"Prioritize requeued I/O");
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, UINT, ZMOD_RW,
"Defer frees starting in this pass");
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, UINT, ZMOD_RW,
"Don't compress starting in this pass");
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, UINT, ZMOD_RW,
"Rewrite new bps starting in this pass");
ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW,
"Throttle block allocations in the ZIO pipeline");
ZFS_MODULE_PARAM(zfs_zio, zio_, deadman_log_all, INT, ZMOD_RW,
"Log all slow ZIOs, not just those with vdevs");
diff --git a/sys/contrib/openzfs/module/zfs/zio_checksum.c b/sys/contrib/openzfs/module/zfs/zio_checksum.c
index 6090959c5b8c..9de515e8767a 100644
--- a/sys/contrib/openzfs/module/zfs/zio_checksum.c
+++ b/sys/contrib/openzfs/module/zfs/zio_checksum.c
@@ -1,575 +1,573 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2016 by Delphix. All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/zil.h>
#include <sys/abd.h>
#include <zfs_fletcher.h>
/*
* Checksum vectors.
*
* In the SPA, everything is checksummed. We support checksum vectors
* for three distinct reasons:
*
* 1. Different kinds of data need different levels of protection.
* For SPA metadata, we always want a very strong checksum.
* For user data, we let users make the trade-off between speed
* and checksum strength.
*
* 2. Cryptographic hash and MAC algorithms are an area of active research.
* It is likely that in future hash functions will be at least as strong
* as current best-of-breed, and may be substantially faster as well.
* We want the ability to take advantage of these new hashes as soon as
* they become available.
*
* 3. If someone develops hardware that can compute a strong hash quickly,
* we want the ability to take advantage of that hardware.
*
* Of course, we don't want a checksum upgrade to invalidate existing
* data, so we store the checksum *function* in eight bits of the bp.
* This gives us room for up to 256 different checksum functions.
*
* When writing a block, we always checksum it with the latest-and-greatest
* checksum function of the appropriate strength. When reading a block,
* we compare the expected checksum against the actual checksum, which we
* compute via the checksum function specified by BP_GET_CHECKSUM(bp).
*
* SALTED CHECKSUMS
*
* To enable the use of less secure hash algorithms with dedup, we
* introduce the notion of salted checksums (MACs, really). A salted
* checksum is fed both a random 256-bit value (the salt) and the data
* to be checksummed. This salt is kept secret (stored on the pool, but
* never shown to the user). Thus even if an attacker knew of collision
* weaknesses in the hash algorithm, they won't be able to mount a known
* plaintext attack on the DDT, since the actual hash value cannot be
* known ahead of time. How the salt is used is algorithm-specific
* (some might simply prefix it to the data block, others might need to
* utilize a full-blown HMAC). On disk the salt is stored in a ZAP
* object in the MOS (DMU_POOL_CHECKSUM_SALT).
*
* CONTEXT TEMPLATES
*
* Some hashing algorithms need to perform a substantial amount of
* initialization work (e.g. salted checksums above may need to pre-hash
* the salt) before being able to process data. Performing this
* redundant work for each block would be wasteful, so we instead allow
* a checksum algorithm to do the work once (the first time it's used)
* and then keep this pre-initialized context as a template inside the
* spa_t (spa_cksum_tmpls). If the zio_checksum_info_t contains
* non-NULL ci_tmpl_init and ci_tmpl_free callbacks, they are used to
* construct and destruct the pre-initialized checksum context. The
* pre-initialized context is then reused during each checksum
* invocation and passed to the checksum function.
*/
static void
abd_checksum_off(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
(void) abd, (void) size, (void) ctx_template;
ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
}
static void
abd_fletcher_2_native(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
(void) ctx_template;
fletcher_init(zcp);
(void) abd_iterate_func(abd, 0, size,
fletcher_2_incremental_native, zcp);
}
static void
abd_fletcher_2_byteswap(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
(void) ctx_template;
fletcher_init(zcp);
(void) abd_iterate_func(abd, 0, size,
fletcher_2_incremental_byteswap, zcp);
}
static inline void
abd_fletcher_4_impl(abd_t *abd, uint64_t size, zio_abd_checksum_data_t *acdp)
{
fletcher_4_abd_ops.acf_init(acdp);
abd_iterate_func(abd, 0, size, fletcher_4_abd_ops.acf_iter, acdp);
fletcher_4_abd_ops.acf_fini(acdp);
}
void
abd_fletcher_4_native(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
(void) ctx_template;
fletcher_4_ctx_t ctx;
zio_abd_checksum_data_t acd = {
.acd_byteorder = ZIO_CHECKSUM_NATIVE,
.acd_zcp = zcp,
.acd_ctx = &ctx
};
abd_fletcher_4_impl(abd, size, &acd);
}
void
abd_fletcher_4_byteswap(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
(void) ctx_template;
fletcher_4_ctx_t ctx;
zio_abd_checksum_data_t acd = {
.acd_byteorder = ZIO_CHECKSUM_BYTESWAP,
.acd_zcp = zcp,
.acd_ctx = &ctx
};
abd_fletcher_4_impl(abd, size, &acd);
}
zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS] = {
{{NULL, NULL}, NULL, NULL, 0, "inherit"},
{{NULL, NULL}, NULL, NULL, 0, "on"},
{{abd_checksum_off, abd_checksum_off},
NULL, NULL, 0, "off"},
{{abd_checksum_sha256, abd_checksum_sha256},
NULL, NULL, ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_EMBEDDED,
"label"},
{{abd_checksum_sha256, abd_checksum_sha256},
NULL, NULL, ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_EMBEDDED,
"gang_header"},
{{abd_fletcher_2_native, abd_fletcher_2_byteswap},
NULL, NULL, ZCHECKSUM_FLAG_EMBEDDED, "zilog"},
{{abd_fletcher_2_native, abd_fletcher_2_byteswap},
NULL, NULL, 0, "fletcher2"},
{{abd_fletcher_4_native, abd_fletcher_4_byteswap},
NULL, NULL, ZCHECKSUM_FLAG_METADATA, "fletcher4"},
{{abd_checksum_sha256, abd_checksum_sha256},
NULL, NULL, ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_DEDUP |
ZCHECKSUM_FLAG_NOPWRITE, "sha256"},
{{abd_fletcher_4_native, abd_fletcher_4_byteswap},
NULL, NULL, ZCHECKSUM_FLAG_EMBEDDED, "zilog2"},
{{abd_checksum_off, abd_checksum_off},
NULL, NULL, 0, "noparity"},
{{abd_checksum_sha512_native, abd_checksum_sha512_byteswap},
NULL, NULL, ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_DEDUP |
ZCHECKSUM_FLAG_NOPWRITE, "sha512"},
{{abd_checksum_skein_native, abd_checksum_skein_byteswap},
abd_checksum_skein_tmpl_init, abd_checksum_skein_tmpl_free,
ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_DEDUP |
ZCHECKSUM_FLAG_SALTED | ZCHECKSUM_FLAG_NOPWRITE, "skein"},
{{abd_checksum_edonr_native, abd_checksum_edonr_byteswap},
abd_checksum_edonr_tmpl_init, abd_checksum_edonr_tmpl_free,
ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_SALTED |
ZCHECKSUM_FLAG_NOPWRITE, "edonr"},
{{abd_checksum_blake3_native, abd_checksum_blake3_byteswap},
abd_checksum_blake3_tmpl_init, abd_checksum_blake3_tmpl_free,
ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_DEDUP |
ZCHECKSUM_FLAG_SALTED | ZCHECKSUM_FLAG_NOPWRITE, "blake3"},
};
/*
* The flag corresponding to the "verify" in dedup=[checksum,]verify
* must be cleared first, so callers should use ZIO_CHECKSUM_MASK.
*/
spa_feature_t
zio_checksum_to_feature(enum zio_checksum cksum)
{
VERIFY((cksum & ~ZIO_CHECKSUM_MASK) == 0);
switch (cksum) {
case ZIO_CHECKSUM_BLAKE3:
return (SPA_FEATURE_BLAKE3);
case ZIO_CHECKSUM_SHA512:
return (SPA_FEATURE_SHA512);
case ZIO_CHECKSUM_SKEIN:
return (SPA_FEATURE_SKEIN);
case ZIO_CHECKSUM_EDONR:
return (SPA_FEATURE_EDONR);
default:
return (SPA_FEATURE_NONE);
}
}
enum zio_checksum
zio_checksum_select(enum zio_checksum child, enum zio_checksum parent)
{
ASSERT(child < ZIO_CHECKSUM_FUNCTIONS);
ASSERT(parent < ZIO_CHECKSUM_FUNCTIONS);
ASSERT(parent != ZIO_CHECKSUM_INHERIT && parent != ZIO_CHECKSUM_ON);
if (child == ZIO_CHECKSUM_INHERIT)
return (parent);
if (child == ZIO_CHECKSUM_ON)
return (ZIO_CHECKSUM_ON_VALUE);
return (child);
}
enum zio_checksum
zio_checksum_dedup_select(spa_t *spa, enum zio_checksum child,
enum zio_checksum parent)
{
ASSERT((child & ZIO_CHECKSUM_MASK) < ZIO_CHECKSUM_FUNCTIONS);
ASSERT((parent & ZIO_CHECKSUM_MASK) < ZIO_CHECKSUM_FUNCTIONS);
ASSERT(parent != ZIO_CHECKSUM_INHERIT && parent != ZIO_CHECKSUM_ON);
if (child == ZIO_CHECKSUM_INHERIT)
return (parent);
if (child == ZIO_CHECKSUM_ON)
return (spa_dedup_checksum(spa));
if (child == (ZIO_CHECKSUM_ON | ZIO_CHECKSUM_VERIFY))
return (spa_dedup_checksum(spa) | ZIO_CHECKSUM_VERIFY);
ASSERT((zio_checksum_table[child & ZIO_CHECKSUM_MASK].ci_flags &
ZCHECKSUM_FLAG_DEDUP) ||
(child & ZIO_CHECKSUM_VERIFY) || child == ZIO_CHECKSUM_OFF);
return (child);
}
/*
* Set the external verifier for a gang block based on <vdev, offset, txg>,
* a tuple which is guaranteed to be unique for the life of the pool.
*/
static void
zio_checksum_gang_verifier(zio_cksum_t *zcp, const blkptr_t *bp)
{
const dva_t *dva = BP_IDENTITY(bp);
uint64_t txg = BP_PHYSICAL_BIRTH(bp);
ASSERT(BP_IS_GANG(bp));
ZIO_SET_CHECKSUM(zcp, DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva), txg, 0);
}
/*
* Set the external verifier for a label block based on its offset.
* The vdev is implicit, and the txg is unknowable at pool open time --
* hence the logic in vdev_uberblock_load() to find the most recent copy.
*/
static void
zio_checksum_label_verifier(zio_cksum_t *zcp, uint64_t offset)
{
ZIO_SET_CHECKSUM(zcp, offset, 0, 0, 0);
}
/*
* Calls the template init function of a checksum which supports context
* templates and installs the template into the spa_t.
*/
static void
zio_checksum_template_init(enum zio_checksum checksum, spa_t *spa)
{
zio_checksum_info_t *ci = &zio_checksum_table[checksum];
if (ci->ci_tmpl_init == NULL)
return;
if (spa->spa_cksum_tmpls[checksum] != NULL)
return;
VERIFY(ci->ci_tmpl_free != NULL);
mutex_enter(&spa->spa_cksum_tmpls_lock);
if (spa->spa_cksum_tmpls[checksum] == NULL) {
spa->spa_cksum_tmpls[checksum] =
ci->ci_tmpl_init(&spa->spa_cksum_salt);
VERIFY(spa->spa_cksum_tmpls[checksum] != NULL);
}
mutex_exit(&spa->spa_cksum_tmpls_lock);
}
/* convenience function to update a checksum to accommodate an encryption MAC */
static void
zio_checksum_handle_crypt(zio_cksum_t *cksum, zio_cksum_t *saved, boolean_t xor)
{
/*
* Weak checksums do not have their entropy spread evenly
* across the bits of the checksum. Therefore, when truncating
* a weak checksum we XOR the first 2 words with the last 2 so
* that we don't "lose" any entropy unnecessarily.
*/
if (xor) {
cksum->zc_word[0] ^= cksum->zc_word[2];
cksum->zc_word[1] ^= cksum->zc_word[3];
}
cksum->zc_word[2] = saved->zc_word[2];
cksum->zc_word[3] = saved->zc_word[3];
}
/*
* Generate the checksum.
*/
void
zio_checksum_compute(zio_t *zio, enum zio_checksum checksum,
abd_t *abd, uint64_t size)
{
static const uint64_t zec_magic = ZEC_MAGIC;
blkptr_t *bp = zio->io_bp;
uint64_t offset = zio->io_offset;
zio_checksum_info_t *ci = &zio_checksum_table[checksum];
zio_cksum_t cksum, saved;
spa_t *spa = zio->io_spa;
boolean_t insecure = (ci->ci_flags & ZCHECKSUM_FLAG_DEDUP) == 0;
ASSERT((uint_t)checksum < ZIO_CHECKSUM_FUNCTIONS);
ASSERT(ci->ci_func[0] != NULL);
zio_checksum_template_init(checksum, spa);
if (ci->ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
zio_eck_t eck;
size_t eck_offset;
memset(&saved, 0, sizeof (zio_cksum_t));
if (checksum == ZIO_CHECKSUM_ZILOG2) {
zil_chain_t zilc;
abd_copy_to_buf(&zilc, abd, sizeof (zil_chain_t));
size = P2ROUNDUP_TYPED(zilc.zc_nused, ZIL_MIN_BLKSZ,
uint64_t);
eck = zilc.zc_eck;
eck_offset = offsetof(zil_chain_t, zc_eck);
} else {
eck_offset = size - sizeof (zio_eck_t);
abd_copy_to_buf_off(&eck, abd, eck_offset,
sizeof (zio_eck_t));
}
if (checksum == ZIO_CHECKSUM_GANG_HEADER) {
zio_checksum_gang_verifier(&eck.zec_cksum, bp);
} else if (checksum == ZIO_CHECKSUM_LABEL) {
zio_checksum_label_verifier(&eck.zec_cksum, offset);
} else {
saved = eck.zec_cksum;
eck.zec_cksum = bp->blk_cksum;
}
abd_copy_from_buf_off(abd, &zec_magic,
eck_offset + offsetof(zio_eck_t, zec_magic),
sizeof (zec_magic));
abd_copy_from_buf_off(abd, &eck.zec_cksum,
eck_offset + offsetof(zio_eck_t, zec_cksum),
sizeof (zio_cksum_t));
ci->ci_func[0](abd, size, spa->spa_cksum_tmpls[checksum],
&cksum);
if (bp != NULL && BP_USES_CRYPT(bp) &&
BP_GET_TYPE(bp) != DMU_OT_OBJSET)
zio_checksum_handle_crypt(&cksum, &saved, insecure);
abd_copy_from_buf_off(abd, &cksum,
eck_offset + offsetof(zio_eck_t, zec_cksum),
sizeof (zio_cksum_t));
} else {
saved = bp->blk_cksum;
ci->ci_func[0](abd, size, spa->spa_cksum_tmpls[checksum],
&cksum);
if (BP_USES_CRYPT(bp) && BP_GET_TYPE(bp) != DMU_OT_OBJSET)
zio_checksum_handle_crypt(&cksum, &saved, insecure);
bp->blk_cksum = cksum;
}
}
int
zio_checksum_error_impl(spa_t *spa, const blkptr_t *bp,
enum zio_checksum checksum, abd_t *abd, uint64_t size, uint64_t offset,
zio_bad_cksum_t *info)
{
zio_checksum_info_t *ci = &zio_checksum_table[checksum];
zio_cksum_t actual_cksum, expected_cksum;
zio_eck_t eck;
int byteswap;
if (checksum >= ZIO_CHECKSUM_FUNCTIONS || ci->ci_func[0] == NULL)
return (SET_ERROR(EINVAL));
zio_checksum_template_init(checksum, spa);
IMPLY(bp == NULL, ci->ci_flags & ZCHECKSUM_FLAG_EMBEDDED);
IMPLY(bp == NULL, checksum == ZIO_CHECKSUM_LABEL);
if (ci->ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
zio_cksum_t verifier;
size_t eck_offset;
if (checksum == ZIO_CHECKSUM_ZILOG2) {
zil_chain_t zilc;
uint64_t nused;
abd_copy_to_buf(&zilc, abd, sizeof (zil_chain_t));
eck = zilc.zc_eck;
eck_offset = offsetof(zil_chain_t, zc_eck) +
offsetof(zio_eck_t, zec_cksum);
if (eck.zec_magic == ZEC_MAGIC) {
nused = zilc.zc_nused;
} else if (eck.zec_magic == BSWAP_64(ZEC_MAGIC)) {
nused = BSWAP_64(zilc.zc_nused);
} else {
return (SET_ERROR(ECKSUM));
}
if (nused > size) {
return (SET_ERROR(ECKSUM));
}
size = P2ROUNDUP_TYPED(nused, ZIL_MIN_BLKSZ, uint64_t);
} else {
eck_offset = size - sizeof (zio_eck_t);
abd_copy_to_buf_off(&eck, abd, eck_offset,
sizeof (zio_eck_t));
eck_offset += offsetof(zio_eck_t, zec_cksum);
}
if (checksum == ZIO_CHECKSUM_GANG_HEADER)
zio_checksum_gang_verifier(&verifier, bp);
else if (checksum == ZIO_CHECKSUM_LABEL)
zio_checksum_label_verifier(&verifier, offset);
else
verifier = bp->blk_cksum;
byteswap = (eck.zec_magic == BSWAP_64(ZEC_MAGIC));
if (byteswap)
byteswap_uint64_array(&verifier, sizeof (zio_cksum_t));
expected_cksum = eck.zec_cksum;
abd_copy_from_buf_off(abd, &verifier, eck_offset,
sizeof (zio_cksum_t));
ci->ci_func[byteswap](abd, size,
spa->spa_cksum_tmpls[checksum], &actual_cksum);
abd_copy_from_buf_off(abd, &expected_cksum, eck_offset,
sizeof (zio_cksum_t));
if (byteswap) {
byteswap_uint64_array(&expected_cksum,
sizeof (zio_cksum_t));
}
} else {
byteswap = BP_SHOULD_BYTESWAP(bp);
expected_cksum = bp->blk_cksum;
ci->ci_func[byteswap](abd, size,
spa->spa_cksum_tmpls[checksum], &actual_cksum);
}
/*
* MAC checksums are a special case since half of this checksum will
* actually be the encryption MAC. This will be verified by the
* decryption process, so we just check the truncated checksum now.
* Objset blocks use embedded MACs so we don't truncate the checksum
* for them.
*/
if (bp != NULL && BP_USES_CRYPT(bp) &&
BP_GET_TYPE(bp) != DMU_OT_OBJSET) {
if (!(ci->ci_flags & ZCHECKSUM_FLAG_DEDUP)) {
actual_cksum.zc_word[0] ^= actual_cksum.zc_word[2];
actual_cksum.zc_word[1] ^= actual_cksum.zc_word[3];
}
actual_cksum.zc_word[2] = 0;
actual_cksum.zc_word[3] = 0;
expected_cksum.zc_word[2] = 0;
expected_cksum.zc_word[3] = 0;
}
if (info != NULL) {
- info->zbc_expected = expected_cksum;
- info->zbc_actual = actual_cksum;
info->zbc_checksum_name = ci->ci_name;
info->zbc_byteswapped = byteswap;
info->zbc_injected = 0;
info->zbc_has_cksum = 1;
}
if (!ZIO_CHECKSUM_EQUAL(actual_cksum, expected_cksum))
return (SET_ERROR(ECKSUM));
return (0);
}
int
zio_checksum_error(zio_t *zio, zio_bad_cksum_t *info)
{
blkptr_t *bp = zio->io_bp;
uint_t checksum = (bp == NULL ? zio->io_prop.zp_checksum :
(BP_IS_GANG(bp) ? ZIO_CHECKSUM_GANG_HEADER : BP_GET_CHECKSUM(bp)));
int error;
uint64_t size = (bp == NULL ? zio->io_size :
(BP_IS_GANG(bp) ? SPA_GANGBLOCKSIZE : BP_GET_PSIZE(bp)));
uint64_t offset = zio->io_offset;
abd_t *data = zio->io_abd;
spa_t *spa = zio->io_spa;
error = zio_checksum_error_impl(spa, bp, checksum, data, size,
offset, info);
if (zio_injection_enabled && error == 0 && zio->io_error == 0) {
error = zio_handle_fault_injection(zio, ECKSUM);
if (error != 0)
info->zbc_injected = 1;
}
return (error);
}
/*
* Called by a spa_t that's about to be deallocated. This steps through
* all of the checksum context templates and deallocates any that were
* initialized using the algorithm-specific template init function.
*/
void
zio_checksum_templates_free(spa_t *spa)
{
for (enum zio_checksum checksum = 0;
checksum < ZIO_CHECKSUM_FUNCTIONS; checksum++) {
if (spa->spa_cksum_tmpls[checksum] != NULL) {
zio_checksum_info_t *ci = &zio_checksum_table[checksum];
VERIFY(ci->ci_tmpl_free != NULL);
ci->ci_tmpl_free(spa->spa_cksum_tmpls[checksum]);
spa->spa_cksum_tmpls[checksum] = NULL;
}
}
}
diff --git a/sys/contrib/openzfs/module/zfs/zvol.c b/sys/contrib/openzfs/module/zfs/zvol.c
index cd4e6f0c7558..53dcb4dee448 100644
--- a/sys/contrib/openzfs/module/zfs/zvol.c
+++ b/sys/contrib/openzfs/module/zfs/zvol.c
@@ -1,1791 +1,1791 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
* LLNL-CODE-403049.
*
* ZFS volume emulation driver.
*
* Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
* Volumes are accessed through the symbolic links named:
*
* /dev/<pool_name>/<dataset_name>
*
* Volumes are persistent through reboot and module load. No user command
* needs to be run before opening and using a device.
*
* Copyright 2014 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2012, 2019 by Delphix. All rights reserved.
*/
/*
* Note on locking of zvol state structures.
*
* These structures are used to maintain internal state used to emulate block
* devices on top of zvols. In particular, management of device minor number
* operations - create, remove, rename, and set_snapdev - involves access to
* these structures. The zvol_state_lock is primarily used to protect the
* zvol_state_list. The zv->zv_state_lock is used to protect the contents
* of the zvol_state_t structures, as well as to make sure that when the
* time comes to remove the structure from the list, it is not in use, and
* therefore, it can be taken off zvol_state_list and freed.
*
* The zv_suspend_lock was introduced to allow for suspending I/O to a zvol,
* e.g. for the duration of receive and rollback operations. This lock can be
* held for significant periods of time. Given that it is undesirable to hold
* mutexes for long periods of time, the following lock ordering applies:
* - take zvol_state_lock if necessary, to protect zvol_state_list
* - take zv_suspend_lock if necessary, by the code path in question
* - take zv_state_lock to protect zvol_state_t
*
* The minor operations are issued to spa->spa_zvol_taskq queues, that are
* single-threaded (to preserve order of minor operations), and are executed
* through the zvol_task_cb that dispatches the specific operations. Therefore,
* these operations are serialized per pool. Consequently, we can be certain
* that for a given zvol, there is only one operation at a time in progress.
* That is why one can be sure that first, zvol_state_t for a given zvol is
* allocated and placed on zvol_state_list, and then other minor operations
* for this zvol are going to proceed in the order of issue.
*
*/
#include <sys/dataset_kstats.h>
#include <sys/dbuf.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dir.h>
#include <sys/zap.h>
#include <sys/zfeature.h>
#include <sys/zil_impl.h>
#include <sys/dmu_tx.h>
#include <sys/zio.h>
#include <sys/zfs_rlock.h>
#include <sys/spa_impl.h>
#include <sys/zvol.h>
#include <sys/zvol_impl.h>
unsigned int zvol_inhibit_dev = 0;
unsigned int zvol_volmode = ZFS_VOLMODE_GEOM;
struct hlist_head *zvol_htable;
static list_t zvol_state_list;
krwlock_t zvol_state_lock;
typedef enum {
ZVOL_ASYNC_REMOVE_MINORS,
ZVOL_ASYNC_RENAME_MINORS,
ZVOL_ASYNC_SET_SNAPDEV,
ZVOL_ASYNC_SET_VOLMODE,
ZVOL_ASYNC_MAX
} zvol_async_op_t;
typedef struct {
zvol_async_op_t op;
char name1[MAXNAMELEN];
char name2[MAXNAMELEN];
uint64_t value;
} zvol_task_t;
uint64_t
zvol_name_hash(const char *name)
{
int i;
uint64_t crc = -1ULL;
const uint8_t *p = (const uint8_t *)name;
ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
for (i = 0; i < MAXNAMELEN - 1 && *p; i++, p++) {
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (*p)) & 0xFF];
}
return (crc);
}
/*
* Find a zvol_state_t given the name and hash generated by zvol_name_hash.
* If found, return with zv_suspend_lock and zv_state_lock taken, otherwise,
* return (NULL) without the taking locks. The zv_suspend_lock is always taken
* before zv_state_lock. The mode argument indicates the mode (including none)
* for zv_suspend_lock to be taken.
*/
zvol_state_t *
zvol_find_by_name_hash(const char *name, uint64_t hash, int mode)
{
zvol_state_t *zv;
struct hlist_node *p = NULL;
rw_enter(&zvol_state_lock, RW_READER);
hlist_for_each(p, ZVOL_HT_HEAD(hash)) {
zv = hlist_entry(p, zvol_state_t, zv_hlink);
mutex_enter(&zv->zv_state_lock);
if (zv->zv_hash == hash &&
strncmp(zv->zv_name, name, MAXNAMELEN) == 0) {
/*
* this is the right zvol, take the locks in the
* right order
*/
if (mode != RW_NONE &&
!rw_tryenter(&zv->zv_suspend_lock, mode)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, mode);
mutex_enter(&zv->zv_state_lock);
/*
* zvol cannot be renamed as we continue
* to hold zvol_state_lock
*/
ASSERT(zv->zv_hash == hash &&
strncmp(zv->zv_name, name, MAXNAMELEN)
== 0);
}
rw_exit(&zvol_state_lock);
return (zv);
}
mutex_exit(&zv->zv_state_lock);
}
rw_exit(&zvol_state_lock);
return (NULL);
}
/*
* Find a zvol_state_t given the name.
* If found, return with zv_suspend_lock and zv_state_lock taken, otherwise,
* return (NULL) without the taking locks. The zv_suspend_lock is always taken
* before zv_state_lock. The mode argument indicates the mode (including none)
* for zv_suspend_lock to be taken.
*/
static zvol_state_t *
zvol_find_by_name(const char *name, int mode)
{
return (zvol_find_by_name_hash(name, zvol_name_hash(name), mode));
}
/*
* ZFS_IOC_CREATE callback handles dmu zvol and zap object creation.
*/
void
zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
{
zfs_creat_t *zct = arg;
nvlist_t *nvprops = zct->zct_props;
int error;
uint64_t volblocksize, volsize;
VERIFY(nvlist_lookup_uint64(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
if (nvlist_lookup_uint64(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
/*
* These properties must be removed from the list so the generic
* property setting step won't apply to them.
*/
VERIFY(nvlist_remove_all(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
(void) nvlist_remove_all(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
DMU_OT_NONE, 0, tx);
ASSERT(error == 0);
error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
DMU_OT_NONE, 0, tx);
ASSERT(error == 0);
error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
ASSERT(error == 0);
}
/*
* ZFS_IOC_OBJSET_STATS entry point.
*/
int
zvol_get_stats(objset_t *os, nvlist_t *nv)
{
int error;
dmu_object_info_t *doi;
uint64_t val;
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
if (error)
return (SET_ERROR(error));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
error = dmu_object_info(os, ZVOL_OBJ, doi);
if (error == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
doi->doi_data_block_size);
}
kmem_free(doi, sizeof (dmu_object_info_t));
return (SET_ERROR(error));
}
/*
* Sanity check volume size.
*/
int
zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
{
if (volsize == 0)
return (SET_ERROR(EINVAL));
if (volsize % blocksize != 0)
return (SET_ERROR(EINVAL));
#ifdef _ILP32
if (volsize - 1 > SPEC_MAXOFFSET_T)
return (SET_ERROR(EOVERFLOW));
#endif
return (0);
}
/*
* Ensure the zap is flushed then inform the VFS of the capacity change.
*/
static int
zvol_update_volsize(uint64_t volsize, objset_t *os)
{
dmu_tx_t *tx;
int error;
uint64_t txg;
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
return (SET_ERROR(error));
}
txg = dmu_tx_get_txg(tx);
error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
&volsize, tx);
dmu_tx_commit(tx);
txg_wait_synced(dmu_objset_pool(os), txg);
if (error == 0)
error = dmu_free_long_range(os,
ZVOL_OBJ, volsize, DMU_OBJECT_END);
return (error);
}
/*
* Set ZFS_PROP_VOLSIZE set entry point. Note that modifying the volume
* size will result in a udev "change" event being generated.
*/
int
zvol_set_volsize(const char *name, uint64_t volsize)
{
objset_t *os = NULL;
uint64_t readonly;
int error;
boolean_t owned = B_FALSE;
error = dsl_prop_get_integer(name,
zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
if (error != 0)
return (SET_ERROR(error));
if (readonly)
return (SET_ERROR(EROFS));
zvol_state_t *zv = zvol_find_by_name(name, RW_READER);
ASSERT(zv == NULL || (MUTEX_HELD(&zv->zv_state_lock) &&
RW_READ_HELD(&zv->zv_suspend_lock)));
if (zv == NULL || zv->zv_objset == NULL) {
if (zv != NULL)
rw_exit(&zv->zv_suspend_lock);
if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE, B_TRUE,
FTAG, &os)) != 0) {
if (zv != NULL)
mutex_exit(&zv->zv_state_lock);
return (SET_ERROR(error));
}
owned = B_TRUE;
if (zv != NULL)
zv->zv_objset = os;
} else {
os = zv->zv_objset;
}
dmu_object_info_t *doi = kmem_alloc(sizeof (*doi), KM_SLEEP);
if ((error = dmu_object_info(os, ZVOL_OBJ, doi)) ||
(error = zvol_check_volsize(volsize, doi->doi_data_block_size)))
goto out;
error = zvol_update_volsize(volsize, os);
if (error == 0 && zv != NULL) {
zv->zv_volsize = volsize;
zv->zv_changed = 1;
}
out:
kmem_free(doi, sizeof (dmu_object_info_t));
if (owned) {
dmu_objset_disown(os, B_TRUE, FTAG);
if (zv != NULL)
zv->zv_objset = NULL;
} else {
rw_exit(&zv->zv_suspend_lock);
}
if (zv != NULL)
mutex_exit(&zv->zv_state_lock);
if (error == 0 && zv != NULL)
zvol_os_update_volsize(zv, volsize);
return (SET_ERROR(error));
}
/*
* Sanity check volume block size.
*/
int
zvol_check_volblocksize(const char *name, uint64_t volblocksize)
{
/* Record sizes above 128k need the feature to be enabled */
if (volblocksize > SPA_OLD_MAXBLOCKSIZE) {
spa_t *spa;
int error;
if ((error = spa_open(name, &spa, FTAG)) != 0)
return (error);
if (!spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
spa_close(spa, FTAG);
return (SET_ERROR(ENOTSUP));
}
/*
* We don't allow setting the property above 1MB,
* unless the tunable has been changed.
*/
if (volblocksize > zfs_max_recordsize)
return (SET_ERROR(EDOM));
spa_close(spa, FTAG);
}
if (volblocksize < SPA_MINBLOCKSIZE ||
volblocksize > SPA_MAXBLOCKSIZE ||
!ISP2(volblocksize))
return (SET_ERROR(EDOM));
return (0);
}
/*
* Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we
* implement DKIOCFREE/free-long-range.
*/
static int
zvol_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
{
zvol_state_t *zv = arg1;
lr_truncate_t *lr = arg2;
uint64_t offset, length;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
offset = lr->lr_offset;
length = lr->lr_length;
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
dmu_tx_mark_netfree(tx);
int error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
dmu_tx_abort(tx);
} else {
(void) zil_replaying(zv->zv_zilog, tx);
dmu_tx_commit(tx);
error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset,
length);
}
return (error);
}
/*
* Replay a TX_WRITE ZIL transaction that didn't get committed
* after a system failure
*/
static int
zvol_replay_write(void *arg1, void *arg2, boolean_t byteswap)
{
zvol_state_t *zv = arg1;
lr_write_t *lr = arg2;
objset_t *os = zv->zv_objset;
char *data = (char *)(lr + 1); /* data follows lr_write_t */
uint64_t offset, length;
dmu_tx_t *tx;
int error;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
offset = lr->lr_offset;
length = lr->lr_length;
/* If it's a dmu_sync() block, write the whole block */
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
if (length < blocksize) {
offset -= offset % blocksize;
length = blocksize;
}
}
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
} else {
dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
(void) zil_replaying(zv->zv_zilog, tx);
dmu_tx_commit(tx);
}
return (error);
}
/*
* Replay a TX_CLONE_RANGE ZIL transaction that didn't get committed
* after a system failure.
*
* TODO: For now we drop block cloning transations for ZVOLs as they are
* unsupported, but we still need to inform BRT about that as we
* claimed them during pool import.
* This situation can occur when we try to import a pool from a ZFS
* version supporting block cloning for ZVOLs into a system that
* has this ZFS version, that doesn't support block cloning for ZVOLs.
*/
static int
zvol_replay_clone_range(void *arg1, void *arg2, boolean_t byteswap)
{
char name[ZFS_MAX_DATASET_NAME_LEN];
zvol_state_t *zv = arg1;
objset_t *os = zv->zv_objset;
lr_clone_range_t *lr = arg2;
blkptr_t *bp;
dmu_tx_t *tx;
spa_t *spa;
uint_t ii;
int error;
dmu_objset_name(os, name);
cmn_err(CE_WARN, "ZFS dropping block cloning transaction for %s.",
name);
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
tx = dmu_tx_create(os);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
return (error);
}
spa = os->os_spa;
for (ii = 0; ii < lr->lr_nbps; ii++) {
bp = &lr->lr_bps[ii];
if (!BP_IS_HOLE(bp)) {
zio_free(spa, dmu_tx_get_txg(tx), bp);
}
}
(void) zil_replaying(zv->zv_zilog, tx);
dmu_tx_commit(tx);
return (0);
}
static int
zvol_replay_err(void *arg1, void *arg2, boolean_t byteswap)
{
(void) arg1, (void) arg2, (void) byteswap;
return (SET_ERROR(ENOTSUP));
}
/*
* Callback vectors for replaying records.
* Only TX_WRITE and TX_TRUNCATE are needed for zvol.
*/
zil_replay_func_t *const zvol_replay_vector[TX_MAX_TYPE] = {
zvol_replay_err, /* no such transaction type */
zvol_replay_err, /* TX_CREATE */
zvol_replay_err, /* TX_MKDIR */
zvol_replay_err, /* TX_MKXATTR */
zvol_replay_err, /* TX_SYMLINK */
zvol_replay_err, /* TX_REMOVE */
zvol_replay_err, /* TX_RMDIR */
zvol_replay_err, /* TX_LINK */
zvol_replay_err, /* TX_RENAME */
zvol_replay_write, /* TX_WRITE */
zvol_replay_truncate, /* TX_TRUNCATE */
zvol_replay_err, /* TX_SETATTR */
zvol_replay_err, /* TX_ACL */
zvol_replay_err, /* TX_CREATE_ATTR */
zvol_replay_err, /* TX_CREATE_ACL_ATTR */
zvol_replay_err, /* TX_MKDIR_ACL */
zvol_replay_err, /* TX_MKDIR_ATTR */
zvol_replay_err, /* TX_MKDIR_ACL_ATTR */
zvol_replay_err, /* TX_WRITE2 */
zvol_replay_err, /* TX_SETSAXATTR */
zvol_replay_err, /* TX_RENAME_EXCHANGE */
zvol_replay_err, /* TX_RENAME_WHITEOUT */
zvol_replay_clone_range /* TX_CLONE_RANGE */
};
/*
* zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
*
* We store data in the log buffers if it's small enough.
* Otherwise we will later flush the data out via dmu_sync().
*/
static const ssize_t zvol_immediate_write_sz = 32768;
void
zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,
uint64_t size, int sync)
{
uint32_t blocksize = zv->zv_volblocksize;
zilog_t *zilog = zv->zv_zilog;
itx_wr_state_t write_state;
uint64_t sz = size;
if (zil_replaying(zilog, tx))
return;
if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
write_state = WR_INDIRECT;
else if (!spa_has_slogs(zilog->zl_spa) &&
size >= blocksize && blocksize > zvol_immediate_write_sz)
write_state = WR_INDIRECT;
else if (sync)
write_state = WR_COPIED;
else
write_state = WR_NEED_COPY;
while (size) {
itx_t *itx;
lr_write_t *lr;
itx_wr_state_t wr_state = write_state;
ssize_t len = size;
if (wr_state == WR_COPIED && size > zil_max_copied_data(zilog))
wr_state = WR_NEED_COPY;
else if (wr_state == WR_INDIRECT)
len = MIN(blocksize - P2PHASE(offset, blocksize), size);
itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
(wr_state == WR_COPIED ? len : 0));
lr = (lr_write_t *)&itx->itx_lr;
if (wr_state == WR_COPIED && dmu_read_by_dnode(zv->zv_dn,
offset, len, lr+1, DMU_READ_NO_PREFETCH) != 0) {
zil_itx_destroy(itx);
itx = zil_itx_create(TX_WRITE, sizeof (*lr));
lr = (lr_write_t *)&itx->itx_lr;
wr_state = WR_NEED_COPY;
}
itx->itx_wr_state = wr_state;
lr->lr_foid = ZVOL_OBJ;
lr->lr_offset = offset;
lr->lr_length = len;
lr->lr_blkoff = 0;
BP_ZERO(&lr->lr_blkptr);
itx->itx_private = zv;
itx->itx_sync = sync;
(void) zil_itx_assign(zilog, itx, tx);
offset += len;
size -= len;
}
if (write_state == WR_COPIED || write_state == WR_NEED_COPY) {
dsl_pool_wrlog_count(zilog->zl_dmu_pool, sz, tx->tx_txg);
}
}
/*
* Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
*/
void
zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
boolean_t sync)
{
itx_t *itx;
lr_truncate_t *lr;
zilog_t *zilog = zv->zv_zilog;
if (zil_replaying(zilog, tx))
return;
itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
lr = (lr_truncate_t *)&itx->itx_lr;
lr->lr_foid = ZVOL_OBJ;
lr->lr_offset = off;
lr->lr_length = len;
itx->itx_sync = sync;
zil_itx_assign(zilog, itx, tx);
}
static void
zvol_get_done(zgd_t *zgd, int error)
{
(void) error;
if (zgd->zgd_db)
dmu_buf_rele(zgd->zgd_db, zgd);
zfs_rangelock_exit(zgd->zgd_lr);
kmem_free(zgd, sizeof (zgd_t));
}
/*
* Get data to generate a TX_WRITE intent log record.
*/
int
zvol_get_data(void *arg, uint64_t arg2, lr_write_t *lr, char *buf,
struct lwb *lwb, zio_t *zio)
{
zvol_state_t *zv = arg;
uint64_t offset = lr->lr_offset;
uint64_t size = lr->lr_length;
dmu_buf_t *db;
zgd_t *zgd;
int error;
ASSERT3P(lwb, !=, NULL);
- ASSERT3P(zio, !=, NULL);
ASSERT3U(size, !=, 0);
zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
zgd->zgd_lwb = lwb;
/*
* Write records come in two flavors: immediate and indirect.
* For small writes it's cheaper to store the data with the
* log record (immediate); for large writes it's cheaper to
* sync the data and get a pointer to it (indirect) so that
* we don't have to write the data twice.
*/
if (buf != NULL) { /* immediate write */
zgd->zgd_lr = zfs_rangelock_enter(&zv->zv_rangelock, offset,
size, RL_READER);
error = dmu_read_by_dnode(zv->zv_dn, offset, size, buf,
DMU_READ_NO_PREFETCH);
} else { /* indirect write */
+ ASSERT3P(zio, !=, NULL);
/*
* Have to lock the whole block to ensure when it's written out
* and its checksum is being calculated that no one can change
* the data. Contrarily to zfs_get_data we need not re-check
* blocksize after we get the lock because it cannot be changed.
*/
size = zv->zv_volblocksize;
offset = P2ALIGN_TYPED(offset, size, uint64_t);
zgd->zgd_lr = zfs_rangelock_enter(&zv->zv_rangelock, offset,
size, RL_READER);
- error = dmu_buf_hold_by_dnode(zv->zv_dn, offset, zgd, &db,
- DMU_READ_NO_PREFETCH);
+ error = dmu_buf_hold_noread_by_dnode(zv->zv_dn, offset, zgd,
+ &db);
if (error == 0) {
blkptr_t *bp = &lr->lr_blkptr;
zgd->zgd_db = db;
zgd->zgd_bp = bp;
ASSERT(db != NULL);
ASSERT(db->db_offset == offset);
ASSERT(db->db_size == size);
error = dmu_sync(zio, lr->lr_common.lrc_txg,
zvol_get_done, zgd);
if (error == 0)
return (0);
}
}
zvol_get_done(zgd, error);
return (SET_ERROR(error));
}
/*
* The zvol_state_t's are inserted into zvol_state_list and zvol_htable.
*/
void
zvol_insert(zvol_state_t *zv)
{
ASSERT(RW_WRITE_HELD(&zvol_state_lock));
list_insert_head(&zvol_state_list, zv);
hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
}
/*
* Simply remove the zvol from to list of zvols.
*/
static void
zvol_remove(zvol_state_t *zv)
{
ASSERT(RW_WRITE_HELD(&zvol_state_lock));
list_remove(&zvol_state_list, zv);
hlist_del(&zv->zv_hlink);
}
/*
* Setup zv after we just own the zv->objset
*/
static int
zvol_setup_zv(zvol_state_t *zv)
{
uint64_t volsize;
int error;
uint64_t ro;
objset_t *os = zv->zv_objset;
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
ASSERT(RW_LOCK_HELD(&zv->zv_suspend_lock));
zv->zv_zilog = NULL;
zv->zv_flags &= ~ZVOL_WRITTEN_TO;
error = dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL);
if (error)
return (SET_ERROR(error));
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
if (error)
return (SET_ERROR(error));
error = dnode_hold(os, ZVOL_OBJ, zv, &zv->zv_dn);
if (error)
return (SET_ERROR(error));
zvol_os_set_capacity(zv, volsize >> 9);
zv->zv_volsize = volsize;
if (ro || dmu_objset_is_snapshot(os) ||
!spa_writeable(dmu_objset_spa(os))) {
zvol_os_set_disk_ro(zv, 1);
zv->zv_flags |= ZVOL_RDONLY;
} else {
zvol_os_set_disk_ro(zv, 0);
zv->zv_flags &= ~ZVOL_RDONLY;
}
return (0);
}
/*
* Shutdown every zv_objset related stuff except zv_objset itself.
* The is the reverse of zvol_setup_zv.
*/
static void
zvol_shutdown_zv(zvol_state_t *zv)
{
ASSERT(MUTEX_HELD(&zv->zv_state_lock) &&
RW_LOCK_HELD(&zv->zv_suspend_lock));
if (zv->zv_flags & ZVOL_WRITTEN_TO) {
ASSERT(zv->zv_zilog != NULL);
zil_close(zv->zv_zilog);
}
zv->zv_zilog = NULL;
dnode_rele(zv->zv_dn, zv);
zv->zv_dn = NULL;
/*
* Evict cached data. We must write out any dirty data before
* disowning the dataset.
*/
if (zv->zv_flags & ZVOL_WRITTEN_TO)
txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
(void) dmu_objset_evict_dbufs(zv->zv_objset);
}
/*
* return the proper tag for rollback and recv
*/
void *
zvol_tag(zvol_state_t *zv)
{
ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
return (zv->zv_open_count > 0 ? zv : NULL);
}
/*
* Suspend the zvol for recv and rollback.
*/
zvol_state_t *
zvol_suspend(const char *name)
{
zvol_state_t *zv;
zv = zvol_find_by_name(name, RW_WRITER);
if (zv == NULL)
return (NULL);
/* block all I/O, release in zvol_resume. */
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
atomic_inc(&zv->zv_suspend_ref);
if (zv->zv_open_count > 0)
zvol_shutdown_zv(zv);
/*
* do not hold zv_state_lock across suspend/resume to
* avoid locking up zvol lookups
*/
mutex_exit(&zv->zv_state_lock);
/* zv_suspend_lock is released in zvol_resume() */
return (zv);
}
int
zvol_resume(zvol_state_t *zv)
{
int error = 0;
ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
mutex_enter(&zv->zv_state_lock);
if (zv->zv_open_count > 0) {
VERIFY0(dmu_objset_hold(zv->zv_name, zv, &zv->zv_objset));
VERIFY3P(zv->zv_objset->os_dsl_dataset->ds_owner, ==, zv);
VERIFY(dsl_dataset_long_held(zv->zv_objset->os_dsl_dataset));
dmu_objset_rele(zv->zv_objset, zv);
error = zvol_setup_zv(zv);
}
mutex_exit(&zv->zv_state_lock);
rw_exit(&zv->zv_suspend_lock);
/*
* We need this because we don't hold zvol_state_lock while releasing
* zv_suspend_lock. zvol_remove_minors_impl thus cannot check
* zv_suspend_lock to determine it is safe to free because rwlock is
* not inherent atomic.
*/
atomic_dec(&zv->zv_suspend_ref);
return (SET_ERROR(error));
}
int
zvol_first_open(zvol_state_t *zv, boolean_t readonly)
{
objset_t *os;
int error;
ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
ASSERT(mutex_owned(&spa_namespace_lock));
boolean_t ro = (readonly || (strchr(zv->zv_name, '@') != NULL));
error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, ro, B_TRUE, zv, &os);
if (error)
return (SET_ERROR(error));
zv->zv_objset = os;
error = zvol_setup_zv(zv);
if (error) {
dmu_objset_disown(os, 1, zv);
zv->zv_objset = NULL;
}
return (error);
}
void
zvol_last_close(zvol_state_t *zv)
{
ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
zvol_shutdown_zv(zv);
dmu_objset_disown(zv->zv_objset, 1, zv);
zv->zv_objset = NULL;
}
typedef struct minors_job {
list_t *list;
list_node_t link;
/* input */
char *name;
/* output */
int error;
} minors_job_t;
/*
* Prefetch zvol dnodes for the minors_job
*/
static void
zvol_prefetch_minors_impl(void *arg)
{
minors_job_t *job = arg;
char *dsname = job->name;
objset_t *os = NULL;
job->error = dmu_objset_own(dsname, DMU_OST_ZVOL, B_TRUE, B_TRUE,
FTAG, &os);
if (job->error == 0) {
dmu_prefetch(os, ZVOL_OBJ, 0, 0, 0, ZIO_PRIORITY_SYNC_READ);
dmu_objset_disown(os, B_TRUE, FTAG);
}
}
/*
* Mask errors to continue dmu_objset_find() traversal
*/
static int
zvol_create_snap_minor_cb(const char *dsname, void *arg)
{
minors_job_t *j = arg;
list_t *minors_list = j->list;
const char *name = j->name;
ASSERT0(MUTEX_HELD(&spa_namespace_lock));
/* skip the designated dataset */
if (name && strcmp(dsname, name) == 0)
return (0);
/* at this point, the dsname should name a snapshot */
if (strchr(dsname, '@') == 0) {
dprintf("zvol_create_snap_minor_cb(): "
"%s is not a snapshot name\n", dsname);
} else {
minors_job_t *job;
char *n = kmem_strdup(dsname);
if (n == NULL)
return (0);
job = kmem_alloc(sizeof (minors_job_t), KM_SLEEP);
job->name = n;
job->list = minors_list;
job->error = 0;
list_insert_tail(minors_list, job);
/* don't care if dispatch fails, because job->error is 0 */
taskq_dispatch(system_taskq, zvol_prefetch_minors_impl, job,
TQ_SLEEP);
}
return (0);
}
/*
* If spa_keystore_load_wkey() is called for an encrypted zvol,
* we need to look for any clones also using the key. This function
* is "best effort" - so we just skip over it if there are failures.
*/
static void
zvol_add_clones(const char *dsname, list_t *minors_list)
{
/* Also check if it has clones */
dsl_dir_t *dd = NULL;
dsl_pool_t *dp = NULL;
if (dsl_pool_hold(dsname, FTAG, &dp) != 0)
return;
if (!spa_feature_is_enabled(dp->dp_spa,
SPA_FEATURE_ENCRYPTION))
goto out;
if (dsl_dir_hold(dp, dsname, FTAG, &dd, NULL) != 0)
goto out;
if (dsl_dir_phys(dd)->dd_clones == 0)
goto out;
zap_cursor_t *zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
zap_attribute_t *za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
objset_t *mos = dd->dd_pool->dp_meta_objset;
for (zap_cursor_init(zc, mos, dsl_dir_phys(dd)->dd_clones);
zap_cursor_retrieve(zc, za) == 0;
zap_cursor_advance(zc)) {
dsl_dataset_t *clone;
minors_job_t *job;
if (dsl_dataset_hold_obj(dd->dd_pool,
za->za_first_integer, FTAG, &clone) == 0) {
char name[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_name(clone, name);
char *n = kmem_strdup(name);
job = kmem_alloc(sizeof (minors_job_t), KM_SLEEP);
job->name = n;
job->list = minors_list;
job->error = 0;
list_insert_tail(minors_list, job);
dsl_dataset_rele(clone, FTAG);
}
}
zap_cursor_fini(zc);
kmem_free(za, sizeof (zap_attribute_t));
kmem_free(zc, sizeof (zap_cursor_t));
out:
if (dd != NULL)
dsl_dir_rele(dd, FTAG);
dsl_pool_rele(dp, FTAG);
}
/*
* Mask errors to continue dmu_objset_find() traversal
*/
static int
zvol_create_minors_cb(const char *dsname, void *arg)
{
uint64_t snapdev;
int error;
list_t *minors_list = arg;
ASSERT0(MUTEX_HELD(&spa_namespace_lock));
error = dsl_prop_get_integer(dsname, "snapdev", &snapdev, NULL);
if (error)
return (0);
/*
* Given the name and the 'snapdev' property, create device minor nodes
* with the linkages to zvols/snapshots as needed.
* If the name represents a zvol, create a minor node for the zvol, then
* check if its snapshots are 'visible', and if so, iterate over the
* snapshots and create device minor nodes for those.
*/
if (strchr(dsname, '@') == 0) {
minors_job_t *job;
char *n = kmem_strdup(dsname);
if (n == NULL)
return (0);
job = kmem_alloc(sizeof (minors_job_t), KM_SLEEP);
job->name = n;
job->list = minors_list;
job->error = 0;
list_insert_tail(minors_list, job);
/* don't care if dispatch fails, because job->error is 0 */
taskq_dispatch(system_taskq, zvol_prefetch_minors_impl, job,
TQ_SLEEP);
zvol_add_clones(dsname, minors_list);
if (snapdev == ZFS_SNAPDEV_VISIBLE) {
/*
* traverse snapshots only, do not traverse children,
* and skip the 'dsname'
*/
(void) dmu_objset_find(dsname,
zvol_create_snap_minor_cb, (void *)job,
DS_FIND_SNAPSHOTS);
}
} else {
dprintf("zvol_create_minors_cb(): %s is not a zvol name\n",
dsname);
}
return (0);
}
/*
* Create minors for the specified dataset, including children and snapshots.
* Pay attention to the 'snapdev' property and iterate over the snapshots
* only if they are 'visible'. This approach allows one to assure that the
* snapshot metadata is read from disk only if it is needed.
*
* The name can represent a dataset to be recursively scanned for zvols and
* their snapshots, or a single zvol snapshot. If the name represents a
* dataset, the scan is performed in two nested stages:
* - scan the dataset for zvols, and
* - for each zvol, create a minor node, then check if the zvol's snapshots
* are 'visible', and only then iterate over the snapshots if needed
*
* If the name represents a snapshot, a check is performed if the snapshot is
* 'visible' (which also verifies that the parent is a zvol), and if so,
* a minor node for that snapshot is created.
*/
void
zvol_create_minors_recursive(const char *name)
{
list_t minors_list;
minors_job_t *job;
if (zvol_inhibit_dev)
return;
/*
* This is the list for prefetch jobs. Whenever we found a match
* during dmu_objset_find, we insert a minors_job to the list and do
* taskq_dispatch to parallel prefetch zvol dnodes. Note we don't need
* any lock because all list operation is done on the current thread.
*
* We will use this list to do zvol_os_create_minor after prefetch
* so we don't have to traverse using dmu_objset_find again.
*/
list_create(&minors_list, sizeof (minors_job_t),
offsetof(minors_job_t, link));
if (strchr(name, '@') != NULL) {
uint64_t snapdev;
int error = dsl_prop_get_integer(name, "snapdev",
&snapdev, NULL);
if (error == 0 && snapdev == ZFS_SNAPDEV_VISIBLE)
(void) zvol_os_create_minor(name);
} else {
fstrans_cookie_t cookie = spl_fstrans_mark();
(void) dmu_objset_find(name, zvol_create_minors_cb,
&minors_list, DS_FIND_CHILDREN);
spl_fstrans_unmark(cookie);
}
taskq_wait_outstanding(system_taskq, 0);
/*
* Prefetch is completed, we can do zvol_os_create_minor
* sequentially.
*/
while ((job = list_remove_head(&minors_list)) != NULL) {
if (!job->error)
(void) zvol_os_create_minor(job->name);
kmem_strfree(job->name);
kmem_free(job, sizeof (minors_job_t));
}
list_destroy(&minors_list);
}
void
zvol_create_minor(const char *name)
{
/*
* Note: the dsl_pool_config_lock must not be held.
* Minor node creation needs to obtain the zvol_state_lock.
* zvol_open() obtains the zvol_state_lock and then the dsl pool
* config lock. Therefore, we can't have the config lock now if
* we are going to wait for the zvol_state_lock, because it
* would be a lock order inversion which could lead to deadlock.
*/
if (zvol_inhibit_dev)
return;
if (strchr(name, '@') != NULL) {
uint64_t snapdev;
int error = dsl_prop_get_integer(name,
"snapdev", &snapdev, NULL);
if (error == 0 && snapdev == ZFS_SNAPDEV_VISIBLE)
(void) zvol_os_create_minor(name);
} else {
(void) zvol_os_create_minor(name);
}
}
/*
* Remove minors for specified dataset including children and snapshots.
*/
static void
zvol_free_task(void *arg)
{
zvol_os_free(arg);
}
void
zvol_remove_minors_impl(const char *name)
{
zvol_state_t *zv, *zv_next;
int namelen = ((name) ? strlen(name) : 0);
taskqid_t t;
list_t free_list;
if (zvol_inhibit_dev)
return;
list_create(&free_list, sizeof (zvol_state_t),
offsetof(zvol_state_t, zv_next));
rw_enter(&zvol_state_lock, RW_WRITER);
for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
zv_next = list_next(&zvol_state_list, zv);
mutex_enter(&zv->zv_state_lock);
if (name == NULL || strcmp(zv->zv_name, name) == 0 ||
(strncmp(zv->zv_name, name, namelen) == 0 &&
(zv->zv_name[namelen] == '/' ||
zv->zv_name[namelen] == '@'))) {
/*
* By holding zv_state_lock here, we guarantee that no
* one is currently using this zv
*/
/* If in use, leave alone */
if (zv->zv_open_count > 0 ||
atomic_read(&zv->zv_suspend_ref)) {
mutex_exit(&zv->zv_state_lock);
continue;
}
zvol_remove(zv);
/*
* Cleared while holding zvol_state_lock as a writer
* which will prevent zvol_open() from opening it.
*/
zvol_os_clear_private(zv);
/* Drop zv_state_lock before zvol_free() */
mutex_exit(&zv->zv_state_lock);
/* Try parallel zv_free, if failed do it in place */
t = taskq_dispatch(system_taskq, zvol_free_task, zv,
TQ_SLEEP);
if (t == TASKQID_INVALID)
list_insert_head(&free_list, zv);
} else {
mutex_exit(&zv->zv_state_lock);
}
}
rw_exit(&zvol_state_lock);
/* Drop zvol_state_lock before calling zvol_free() */
while ((zv = list_remove_head(&free_list)) != NULL)
zvol_os_free(zv);
}
/* Remove minor for this specific volume only */
static void
zvol_remove_minor_impl(const char *name)
{
zvol_state_t *zv = NULL, *zv_next;
if (zvol_inhibit_dev)
return;
rw_enter(&zvol_state_lock, RW_WRITER);
for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
zv_next = list_next(&zvol_state_list, zv);
mutex_enter(&zv->zv_state_lock);
if (strcmp(zv->zv_name, name) == 0) {
/*
* By holding zv_state_lock here, we guarantee that no
* one is currently using this zv
*/
/* If in use, leave alone */
if (zv->zv_open_count > 0 ||
atomic_read(&zv->zv_suspend_ref)) {
mutex_exit(&zv->zv_state_lock);
continue;
}
zvol_remove(zv);
zvol_os_clear_private(zv);
mutex_exit(&zv->zv_state_lock);
break;
} else {
mutex_exit(&zv->zv_state_lock);
}
}
/* Drop zvol_state_lock before calling zvol_free() */
rw_exit(&zvol_state_lock);
if (zv != NULL)
zvol_os_free(zv);
}
/*
* Rename minors for specified dataset including children and snapshots.
*/
static void
zvol_rename_minors_impl(const char *oldname, const char *newname)
{
zvol_state_t *zv, *zv_next;
int oldnamelen;
if (zvol_inhibit_dev)
return;
oldnamelen = strlen(oldname);
rw_enter(&zvol_state_lock, RW_READER);
for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
zv_next = list_next(&zvol_state_list, zv);
mutex_enter(&zv->zv_state_lock);
if (strcmp(zv->zv_name, oldname) == 0) {
zvol_os_rename_minor(zv, newname);
} else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
(zv->zv_name[oldnamelen] == '/' ||
zv->zv_name[oldnamelen] == '@')) {
char *name = kmem_asprintf("%s%c%s", newname,
zv->zv_name[oldnamelen],
zv->zv_name + oldnamelen + 1);
zvol_os_rename_minor(zv, name);
kmem_strfree(name);
}
mutex_exit(&zv->zv_state_lock);
}
rw_exit(&zvol_state_lock);
}
typedef struct zvol_snapdev_cb_arg {
uint64_t snapdev;
} zvol_snapdev_cb_arg_t;
static int
zvol_set_snapdev_cb(const char *dsname, void *param)
{
zvol_snapdev_cb_arg_t *arg = param;
if (strchr(dsname, '@') == NULL)
return (0);
switch (arg->snapdev) {
case ZFS_SNAPDEV_VISIBLE:
(void) zvol_os_create_minor(dsname);
break;
case ZFS_SNAPDEV_HIDDEN:
(void) zvol_remove_minor_impl(dsname);
break;
}
return (0);
}
static void
zvol_set_snapdev_impl(char *name, uint64_t snapdev)
{
zvol_snapdev_cb_arg_t arg = {snapdev};
fstrans_cookie_t cookie = spl_fstrans_mark();
/*
* The zvol_set_snapdev_sync() sets snapdev appropriately
* in the dataset hierarchy. Here, we only scan snapshots.
*/
dmu_objset_find(name, zvol_set_snapdev_cb, &arg, DS_FIND_SNAPSHOTS);
spl_fstrans_unmark(cookie);
}
static void
zvol_set_volmode_impl(char *name, uint64_t volmode)
{
fstrans_cookie_t cookie;
uint64_t old_volmode;
zvol_state_t *zv;
if (strchr(name, '@') != NULL)
return;
/*
* It's unfortunate we need to remove minors before we create new ones:
* this is necessary because our backing gendisk (zvol_state->zv_disk)
* could be different when we set, for instance, volmode from "geom"
* to "dev" (or vice versa).
*/
zv = zvol_find_by_name(name, RW_NONE);
if (zv == NULL && volmode == ZFS_VOLMODE_NONE)
return;
if (zv != NULL) {
old_volmode = zv->zv_volmode;
mutex_exit(&zv->zv_state_lock);
if (old_volmode == volmode)
return;
zvol_wait_close(zv);
}
cookie = spl_fstrans_mark();
switch (volmode) {
case ZFS_VOLMODE_NONE:
(void) zvol_remove_minor_impl(name);
break;
case ZFS_VOLMODE_GEOM:
case ZFS_VOLMODE_DEV:
(void) zvol_remove_minor_impl(name);
(void) zvol_os_create_minor(name);
break;
case ZFS_VOLMODE_DEFAULT:
(void) zvol_remove_minor_impl(name);
if (zvol_volmode == ZFS_VOLMODE_NONE)
break;
else /* if zvol_volmode is invalid defaults to "geom" */
(void) zvol_os_create_minor(name);
break;
}
spl_fstrans_unmark(cookie);
}
static zvol_task_t *
zvol_task_alloc(zvol_async_op_t op, const char *name1, const char *name2,
uint64_t value)
{
zvol_task_t *task;
/* Never allow tasks on hidden names. */
if (name1[0] == '$')
return (NULL);
task = kmem_zalloc(sizeof (zvol_task_t), KM_SLEEP);
task->op = op;
task->value = value;
strlcpy(task->name1, name1, MAXNAMELEN);
if (name2 != NULL)
strlcpy(task->name2, name2, MAXNAMELEN);
return (task);
}
static void
zvol_task_free(zvol_task_t *task)
{
kmem_free(task, sizeof (zvol_task_t));
}
/*
* The worker thread function performed asynchronously.
*/
static void
zvol_task_cb(void *arg)
{
zvol_task_t *task = arg;
switch (task->op) {
case ZVOL_ASYNC_REMOVE_MINORS:
zvol_remove_minors_impl(task->name1);
break;
case ZVOL_ASYNC_RENAME_MINORS:
zvol_rename_minors_impl(task->name1, task->name2);
break;
case ZVOL_ASYNC_SET_SNAPDEV:
zvol_set_snapdev_impl(task->name1, task->value);
break;
case ZVOL_ASYNC_SET_VOLMODE:
zvol_set_volmode_impl(task->name1, task->value);
break;
default:
VERIFY(0);
break;
}
zvol_task_free(task);
}
typedef struct zvol_set_prop_int_arg {
const char *zsda_name;
uint64_t zsda_value;
zprop_source_t zsda_source;
dmu_tx_t *zsda_tx;
} zvol_set_prop_int_arg_t;
/*
* Sanity check the dataset for safe use by the sync task. No additional
* conditions are imposed.
*/
static int
zvol_set_snapdev_check(void *arg, dmu_tx_t *tx)
{
zvol_set_prop_int_arg_t *zsda = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd;
int error;
error = dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL);
if (error != 0)
return (error);
dsl_dir_rele(dd, FTAG);
return (error);
}
static int
zvol_set_snapdev_sync_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
(void) arg;
char dsname[MAXNAMELEN];
zvol_task_t *task;
uint64_t snapdev;
dsl_dataset_name(ds, dsname);
if (dsl_prop_get_int_ds(ds, "snapdev", &snapdev) != 0)
return (0);
task = zvol_task_alloc(ZVOL_ASYNC_SET_SNAPDEV, dsname, NULL, snapdev);
if (task == NULL)
return (0);
(void) taskq_dispatch(dp->dp_spa->spa_zvol_taskq, zvol_task_cb,
task, TQ_SLEEP);
return (0);
}
/*
* Traverse all child datasets and apply snapdev appropriately.
* We call dsl_prop_set_sync_impl() here to set the value only on the toplevel
* dataset and read the effective "snapdev" on every child in the callback
* function: this is because the value is not guaranteed to be the same in the
* whole dataset hierarchy.
*/
static void
zvol_set_snapdev_sync(void *arg, dmu_tx_t *tx)
{
zvol_set_prop_int_arg_t *zsda = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd;
dsl_dataset_t *ds;
int error;
VERIFY0(dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL));
zsda->zsda_tx = tx;
error = dsl_dataset_hold(dp, zsda->zsda_name, FTAG, &ds);
if (error == 0) {
dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_SNAPDEV),
zsda->zsda_source, sizeof (zsda->zsda_value), 1,
&zsda->zsda_value, zsda->zsda_tx);
dsl_dataset_rele(ds, FTAG);
}
dmu_objset_find_dp(dp, dd->dd_object, zvol_set_snapdev_sync_cb,
zsda, DS_FIND_CHILDREN);
dsl_dir_rele(dd, FTAG);
}
int
zvol_set_snapdev(const char *ddname, zprop_source_t source, uint64_t snapdev)
{
zvol_set_prop_int_arg_t zsda;
zsda.zsda_name = ddname;
zsda.zsda_source = source;
zsda.zsda_value = snapdev;
return (dsl_sync_task(ddname, zvol_set_snapdev_check,
zvol_set_snapdev_sync, &zsda, 0, ZFS_SPACE_CHECK_NONE));
}
/*
* Sanity check the dataset for safe use by the sync task. No additional
* conditions are imposed.
*/
static int
zvol_set_volmode_check(void *arg, dmu_tx_t *tx)
{
zvol_set_prop_int_arg_t *zsda = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd;
int error;
error = dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL);
if (error != 0)
return (error);
dsl_dir_rele(dd, FTAG);
return (error);
}
static int
zvol_set_volmode_sync_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
(void) arg;
char dsname[MAXNAMELEN];
zvol_task_t *task;
uint64_t volmode;
dsl_dataset_name(ds, dsname);
if (dsl_prop_get_int_ds(ds, "volmode", &volmode) != 0)
return (0);
task = zvol_task_alloc(ZVOL_ASYNC_SET_VOLMODE, dsname, NULL, volmode);
if (task == NULL)
return (0);
(void) taskq_dispatch(dp->dp_spa->spa_zvol_taskq, zvol_task_cb,
task, TQ_SLEEP);
return (0);
}
/*
* Traverse all child datasets and apply volmode appropriately.
* We call dsl_prop_set_sync_impl() here to set the value only on the toplevel
* dataset and read the effective "volmode" on every child in the callback
* function: this is because the value is not guaranteed to be the same in the
* whole dataset hierarchy.
*/
static void
zvol_set_volmode_sync(void *arg, dmu_tx_t *tx)
{
zvol_set_prop_int_arg_t *zsda = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd;
dsl_dataset_t *ds;
int error;
VERIFY0(dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL));
zsda->zsda_tx = tx;
error = dsl_dataset_hold(dp, zsda->zsda_name, FTAG, &ds);
if (error == 0) {
dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_VOLMODE),
zsda->zsda_source, sizeof (zsda->zsda_value), 1,
&zsda->zsda_value, zsda->zsda_tx);
dsl_dataset_rele(ds, FTAG);
}
dmu_objset_find_dp(dp, dd->dd_object, zvol_set_volmode_sync_cb,
zsda, DS_FIND_CHILDREN);
dsl_dir_rele(dd, FTAG);
}
int
zvol_set_volmode(const char *ddname, zprop_source_t source, uint64_t volmode)
{
zvol_set_prop_int_arg_t zsda;
zsda.zsda_name = ddname;
zsda.zsda_source = source;
zsda.zsda_value = volmode;
return (dsl_sync_task(ddname, zvol_set_volmode_check,
zvol_set_volmode_sync, &zsda, 0, ZFS_SPACE_CHECK_NONE));
}
void
zvol_remove_minors(spa_t *spa, const char *name, boolean_t async)
{
zvol_task_t *task;
taskqid_t id;
task = zvol_task_alloc(ZVOL_ASYNC_REMOVE_MINORS, name, NULL, ~0ULL);
if (task == NULL)
return;
id = taskq_dispatch(spa->spa_zvol_taskq, zvol_task_cb, task, TQ_SLEEP);
if ((async == B_FALSE) && (id != TASKQID_INVALID))
taskq_wait_id(spa->spa_zvol_taskq, id);
}
void
zvol_rename_minors(spa_t *spa, const char *name1, const char *name2,
boolean_t async)
{
zvol_task_t *task;
taskqid_t id;
task = zvol_task_alloc(ZVOL_ASYNC_RENAME_MINORS, name1, name2, ~0ULL);
if (task == NULL)
return;
id = taskq_dispatch(spa->spa_zvol_taskq, zvol_task_cb, task, TQ_SLEEP);
if ((async == B_FALSE) && (id != TASKQID_INVALID))
taskq_wait_id(spa->spa_zvol_taskq, id);
}
boolean_t
zvol_is_zvol(const char *name)
{
return (zvol_os_is_zvol(name));
}
int
zvol_init_impl(void)
{
int i;
list_create(&zvol_state_list, sizeof (zvol_state_t),
offsetof(zvol_state_t, zv_next));
rw_init(&zvol_state_lock, NULL, RW_DEFAULT, NULL);
zvol_htable = kmem_alloc(ZVOL_HT_SIZE * sizeof (struct hlist_head),
KM_SLEEP);
for (i = 0; i < ZVOL_HT_SIZE; i++)
INIT_HLIST_HEAD(&zvol_htable[i]);
return (0);
}
void
zvol_fini_impl(void)
{
zvol_remove_minors_impl(NULL);
/*
* The call to "zvol_remove_minors_impl" may dispatch entries to
* the system_taskq, but it doesn't wait for those entries to
* complete before it returns. Thus, we must wait for all of the
* removals to finish, before we can continue.
*/
taskq_wait_outstanding(system_taskq, 0);
kmem_free(zvol_htable, ZVOL_HT_SIZE * sizeof (struct hlist_head));
list_destroy(&zvol_state_list);
rw_destroy(&zvol_state_lock);
}
diff --git a/sys/contrib/openzfs/tests/runfiles/linux.run b/sys/contrib/openzfs/tests/runfiles/linux.run
index 618eeb934017..2c8d5cb0ecbb 100644
--- a/sys/contrib/openzfs/tests/runfiles/linux.run
+++ b/sys/contrib/openzfs/tests/runfiles/linux.run
@@ -1,210 +1,221 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
[DEFAULT]
pre = setup
quiet = False
pre_user = root
user = root
timeout = 600
post_user = root
post = cleanup
failsafe_user = root
failsafe = callbacks/zfs_failsafe
outputdir = /var/tmp/test_results
tags = ['functional']
[tests/functional/acl/posix:Linux]
tests = ['posix_001_pos', 'posix_002_pos', 'posix_003_pos', 'posix_004_pos']
tags = ['functional', 'acl', 'posix']
[tests/functional/acl/posix-sa:Linux]
tests = ['posix_001_pos', 'posix_002_pos', 'posix_003_pos', 'posix_004_pos']
tags = ['functional', 'acl', 'posix-sa']
[tests/functional/atime:Linux]
tests = ['atime_003_pos', 'root_relatime_on']
tags = ['functional', 'atime']
+[tests/functional/block_cloning:Linux]
+tests = ['block_cloning_copyfilerange', 'block_cloning_copyfilerange_partial',
+ 'block_cloning_copyfilerange_fallback',
+ 'block_cloning_ficlone', 'block_cloning_ficlonerange',
+ 'block_cloning_ficlonerange_partial',
+ 'block_cloning_disabled_copyfilerange', 'block_cloning_disabled_ficlone',
+ 'block_cloning_disabled_ficlonerange',
+ 'block_cloning_copyfilerange_cross_dataset',
+ 'block_cloning_copyfilerange_fallback_same_txg']
+tags = ['functional', 'block_cloning']
+
[tests/functional/chattr:Linux]
tests = ['chattr_001_pos', 'chattr_002_neg']
tags = ['functional', 'chattr']
[tests/functional/cli_root/zfs:Linux]
tests = ['zfs_003_neg']
tags = ['functional', 'cli_root', 'zfs']
[tests/functional/cli_root/zfs_mount:Linux]
tests = ['zfs_mount_006_pos', 'zfs_mount_008_pos', 'zfs_mount_013_pos',
'zfs_mount_014_neg', 'zfs_multi_mount']
tags = ['functional', 'cli_root', 'zfs_mount']
[tests/functional/cli_root/zfs_share:Linux]
tests = ['zfs_share_005_pos', 'zfs_share_007_neg', 'zfs_share_009_neg',
'zfs_share_012_pos', 'zfs_share_013_pos']
tags = ['functional', 'cli_root', 'zfs_share']
[tests/functional/cli_root/zfs_unshare:Linux]
tests = ['zfs_unshare_008_pos']
tags = ['functional', 'cli_root', 'zfs_unshare']
[tests/functional/cli_root/zfs_sysfs:Linux]
tests = ['zfeature_set_unsupported', 'zfs_get_unsupported',
'zfs_set_unsupported', 'zfs_sysfs_live', 'zpool_get_unsupported',
'zpool_set_unsupported']
tags = ['functional', 'cli_root', 'zfs_sysfs']
[tests/functional/cli_root/zpool_add:Linux]
tests = ['add_nested_replacing_spare']
tags = ['functional', 'cli_root', 'zpool_add']
[tests/functional/cli_root/zpool_expand:Linux]
tests = ['zpool_expand_001_pos', 'zpool_expand_002_pos',
'zpool_expand_003_neg', 'zpool_expand_004_pos', 'zpool_expand_005_pos']
tags = ['functional', 'cli_root', 'zpool_expand']
[tests/functional/cli_root/zpool_reopen:Linux]
tests = ['zpool_reopen_001_pos', 'zpool_reopen_002_pos',
'zpool_reopen_003_pos', 'zpool_reopen_004_pos', 'zpool_reopen_005_pos',
'zpool_reopen_006_neg', 'zpool_reopen_007_pos']
tags = ['functional', 'cli_root', 'zpool_reopen']
[tests/functional/cli_root/zpool_split:Linux]
tests = ['zpool_split_wholedisk']
tags = ['functional', 'cli_root', 'zpool_split']
[tests/functional/compression:Linux]
tests = ['compress_004_pos']
tags = ['functional', 'compression']
[tests/functional/devices:Linux]
tests = ['devices_001_pos', 'devices_002_neg', 'devices_003_pos']
tags = ['functional', 'devices']
[tests/functional/events:Linux]
tests = ['events_001_pos', 'events_002_pos', 'zed_rc_filter', 'zed_fd_spill',
'zed_cksum_reported', 'zed_cksum_config', 'zed_io_config']
tags = ['functional', 'events']
[tests/functional/fadvise:Linux]
tests = ['fadvise_sequential']
tags = ['functional', 'fadvise']
[tests/functional/fallocate:Linux]
tests = ['fallocate_prealloc', 'fallocate_zero-range']
tags = ['functional', 'fallocate']
[tests/functional/fault:Linux]
tests = ['auto_offline_001_pos', 'auto_online_001_pos', 'auto_online_002_pos',
'auto_replace_001_pos', 'auto_spare_001_pos', 'auto_spare_002_pos',
'auto_spare_multiple', 'auto_spare_ashift', 'auto_spare_shared',
'decrypt_fault', 'decompress_fault', 'scrub_after_resilver',
'zpool_status_-s']
tags = ['functional', 'fault']
[tests/functional/features/large_dnode:Linux]
tests = ['large_dnode_002_pos', 'large_dnode_006_pos', 'large_dnode_008_pos']
tags = ['functional', 'features', 'large_dnode']
[tests/functional/io:Linux]
tests = ['libaio', 'io_uring']
tags = ['functional', 'io']
[tests/functional/largest_pool:Linux]
tests = ['largest_pool_001_pos']
pre =
post =
tags = ['functional', 'largest_pool']
[tests/functional/mmap:Linux]
tests = ['mmap_libaio_001_pos', 'mmap_sync_001_pos']
tags = ['functional', 'mmap']
[tests/functional/mmp:Linux]
tests = ['mmp_on_thread', 'mmp_on_uberblocks', 'mmp_on_off', 'mmp_interval',
'mmp_active_import', 'mmp_inactive_import', 'mmp_exported_import',
'mmp_write_uberblocks', 'mmp_reset_interval', 'multihost_history',
'mmp_on_zdb', 'mmp_write_distribution', 'mmp_hostid']
tags = ['functional', 'mmp']
[tests/functional/mount:Linux]
tests = ['umount_unlinked_drain']
tags = ['functional', 'mount']
[tests/functional/pam:Linux]
tests = ['pam_basic', 'pam_change_unmounted', 'pam_nounmount', 'pam_recursive',
'pam_short_password']
tags = ['functional', 'pam']
[tests/functional/procfs:Linux]
tests = ['procfs_list_basic', 'procfs_list_concurrent_readers',
'procfs_list_stale_read', 'pool_state']
tags = ['functional', 'procfs']
[tests/functional/projectquota:Linux]
tests = ['projectid_001_pos', 'projectid_002_pos', 'projectid_003_pos',
'projectquota_001_pos', 'projectquota_002_pos', 'projectquota_003_pos',
'projectquota_004_neg', 'projectquota_005_pos', 'projectquota_006_pos',
'projectquota_007_pos', 'projectquota_008_pos', 'projectquota_009_pos',
'projectspace_001_pos', 'projectspace_002_pos', 'projectspace_003_pos',
'projectspace_004_pos',
'projecttree_001_pos', 'projecttree_002_pos', 'projecttree_003_neg']
tags = ['functional', 'projectquota']
[tests/functional/dos_attributes:Linux]
tests = ['read_dos_attrs_001', 'write_dos_attrs_001']
tags = ['functional', 'dos_attributes']
[tests/functional/renameat2:Linux]
tests = ['renameat2_noreplace', 'renameat2_exchange', 'renameat2_whiteout']
tags = ['functional', 'renameat2']
[tests/functional/rsend:Linux]
tests = ['send_realloc_dnode_size', 'send_encrypted_files']
tags = ['functional', 'rsend']
[tests/functional/simd:Linux]
pre =
post =
tests = ['simd_supported']
tags = ['functional', 'simd']
[tests/functional/snapshot:Linux]
tests = ['snapshot_015_pos', 'snapshot_016_pos']
tags = ['functional', 'snapshot']
[tests/functional/tmpfile:Linux]
tests = ['tmpfile_001_pos', 'tmpfile_002_pos', 'tmpfile_003_pos',
'tmpfile_stat_mode']
tags = ['functional', 'tmpfile']
[tests/functional/upgrade:Linux]
tests = ['upgrade_projectquota_001_pos']
tags = ['functional', 'upgrade']
[tests/functional/user_namespace:Linux]
tests = ['user_namespace_001', 'user_namespace_002', 'user_namespace_003',
'user_namespace_004']
tags = ['functional', 'user_namespace']
[tests/functional/userquota:Linux]
tests = ['groupspace_001_pos', 'groupspace_002_pos', 'groupspace_003_pos',
'userquota_013_pos', 'userspace_003_pos']
tags = ['functional', 'userquota']
[tests/functional/zvol/zvol_misc:Linux]
tests = ['zvol_misc_fua']
tags = ['functional', 'zvol', 'zvol_misc']
[tests/functional/idmap_mount:Linux]
tests = ['idmap_mount_001', 'idmap_mount_002', 'idmap_mount_003',
'idmap_mount_004', 'idmap_mount_005']
tags = ['functional', 'idmap_mount']
diff --git a/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in b/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
index cf438e0e6495..e1bbe063ab4c 100755
--- a/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
+++ b/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
@@ -1,458 +1,476 @@
#!/usr/bin/env @PYTHON_SHEBANG@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2017 by Delphix. All rights reserved.
# Copyright (c) 2018 by Lawrence Livermore National Security, LLC.
#
# This script must remain compatible with Python 3.6+.
#
import os
import re
import sys
import argparse
#
# This script parses the stdout of zfstest, which has this format:
#
# Test: /path/to/testa (run as root) [00:00] [PASS]
# Test: /path/to/testb (run as jkennedy) [00:00] [PASS]
# Test: /path/to/testc (run as root) [00:00] [FAIL]
# [...many more results...]
#
# Results Summary
# FAIL 22
# SKIP 32
# PASS 1156
#
# Running Time: 02:50:31
# Percent passed: 95.5%
# Log directory: /var/tmp/test_results/20180615T205926
#
#
# Common generic reasons for a test or test group to be skipped.
#
# Some test cases are known to fail in ways which are not harmful or dangerous.
# In these cases simply mark the test as a known failure until it can be
# updated and the issue resolved. Note that it's preferable to open a unique
# issue on the GitHub issue tracker for each test case failure.
#
known_reason = 'Known issue'
#
# Some tests require that a test user be able to execute the zfs utilities.
# This may not be possible when testing in-tree due to the default permissions
# on the user's home directory. When testing this can be resolved by granting
# group read access.
#
# chmod 0750 $HOME
#
exec_reason = 'Test user execute permissions required for utilities'
#
# Some tests require that the kernel supports renameat2 syscall.
#
renameat2_reason = 'Kernel renameat2 support required'
#
# Some tests require the O_TMPFILE flag which was first introduced in the
# 3.11 kernel.
#
tmpfile_reason = 'Kernel O_TMPFILE support required'
#
# Some tests require the statx(2) system call on Linux which was first
# introduced in the 4.11 kernel.
#
statx_reason = 'Kernel statx(2) system call required on Linux'
#
# Some tests require that the lsattr utility support the project id feature.
#
project_id_reason = 'lsattr with set/show project ID required'
#
# Some tests require that the kernel support user namespaces.
#
user_ns_reason = 'Kernel user namespace support required'
#
# Some rewind tests can fail since nothing guarantees that old MOS blocks
# are not overwritten. Snapshots protect datasets and data files but not
# the MOS. Reasonable efforts are made in the test case to increase the
# odds that some txgs will have their MOS data left untouched, but it is
# never a sure thing.
#
rewind_reason = 'Arbitrary pool rewind is not guaranteed'
#
# Some tests require a minimum version of the fio benchmark utility.
# Older distributions such as CentOS 6.x only provide fio-2.0.13.
#
fio_reason = 'Fio v2.3 or newer required'
#
# Some tests require that the DISKS provided support the discard operation.
# Normally this is not an issue because loop back devices are used for DISKS
# and they support discard (TRIM/UNMAP).
#
trim_reason = 'DISKS must support discard (TRIM/UNMAP)'
#
# Some tests on FreeBSD require the fspacectl(2) system call and the
# truncate(1) utility supporting the -d option. The system call was first
# introduced in FreeBSD version 1400032.
#
fspacectl_reason = 'fspacectl(2) and truncate -d support required'
#
# Some tests are not applicable to a platform or need to be updated to operate
# in the manor required by the platform. Any tests which are skipped for this
# reason will be suppressed in the final analysis output.
#
na_reason = "Not applicable"
#
# Some test cases doesn't have all requirements to run on Github actions CI.
#
ci_reason = 'CI runner doesn\'t have all requirements'
#
# Idmapped mount is only supported in kernel version >= 5.12
#
idmap_reason = 'Idmapped mount needs kernel 5.12+'
+#
+# copy_file_range() is not supported by all kernels
+#
+cfr_reason = 'Kernel copy_file_range support required'
+cfr_cross_reason = 'copy_file_range(2) cross-filesystem needs kernel 5.3+'
+
#
# These tests are known to fail, thus we use this list to prevent these
# failures from failing the job as a whole; only unexpected failures
# bubble up to cause this script to exit with a non-zero exit status.
#
# Format: { 'test-name': ['expected result', 'issue-number | reason'] }
#
# For each known failure it is recommended to link to a GitHub issue by
# setting the reason to the issue number. Alternately, one of the generic
# reasons listed above can be used.
#
known = {
'casenorm/mixed_none_lookup_ci': ['FAIL', 7633],
'casenorm/mixed_formd_lookup_ci': ['FAIL', 7633],
'cli_root/zpool_import/import_rewind_device_replaced':
['FAIL', rewind_reason],
'cli_user/misc/zfs_share_001_neg': ['SKIP', na_reason],
'cli_user/misc/zfs_unshare_001_neg': ['SKIP', na_reason],
'pool_checkpoint/checkpoint_discard_busy': ['SKIP', 12053],
'privilege/setup': ['SKIP', na_reason],
'refreserv/refreserv_004_pos': ['FAIL', known_reason],
'rootpool/setup': ['SKIP', na_reason],
'rsend/rsend_008_pos': ['SKIP', 6066],
'vdev_zaps/vdev_zaps_007_pos': ['FAIL', known_reason],
}
if sys.platform.startswith('freebsd'):
known.update({
'cli_root/zfs_receive/receive-o-x_props_override':
['FAIL', known_reason],
'cli_root/zpool_resilver/zpool_resilver_concurrent':
['SKIP', na_reason],
'cli_root/zpool_wait/zpool_wait_trim_basic': ['SKIP', trim_reason],
'cli_root/zpool_wait/zpool_wait_trim_cancel': ['SKIP', trim_reason],
'cli_root/zpool_wait/zpool_wait_trim_flag': ['SKIP', trim_reason],
'cli_root/zfs_unshare/zfs_unshare_008_pos': ['SKIP', na_reason],
'link_count/link_count_001': ['SKIP', na_reason],
'casenorm/mixed_create_failure': ['FAIL', 13215],
'mmap/mmap_sync_001_pos': ['SKIP', na_reason],
'rsend/send_raw_ashift': ['SKIP', 14961],
})
elif sys.platform.startswith('linux'):
known.update({
'casenorm/mixed_formd_lookup': ['FAIL', 7633],
'casenorm/mixed_formd_delete': ['FAIL', 7633],
'casenorm/sensitive_formd_lookup': ['FAIL', 7633],
'casenorm/sensitive_formd_delete': ['FAIL', 7633],
'removal/removal_with_zdb': ['SKIP', known_reason],
'cli_root/zfs_unshare/zfs_unshare_002_pos': ['SKIP', na_reason],
})
#
# These tests may occasionally fail or be skipped. We want there failures
# to be reported but only unexpected failures should bubble up to cause
# this script to exit with a non-zero exit status.
#
# Format: { 'test-name': ['expected result', 'issue-number | reason'] }
#
# For each known failure it is recommended to link to a GitHub issue by
# setting the reason to the issue number. Alternately, one of the generic
# reasons listed above can be used.
#
maybe = {
'append/threadsappend_001_pos': ['FAIL', 6136],
'chattr/setup': ['SKIP', exec_reason],
'crtime/crtime_001_pos': ['SKIP', statx_reason],
'cli_root/zdb/zdb_006_pos': ['FAIL', known_reason],
'cli_root/zfs_destroy/zfs_destroy_dev_removal_condense':
['FAIL', known_reason],
'cli_root/zfs_get/zfs_get_004_pos': ['FAIL', known_reason],
'cli_root/zfs_get/zfs_get_009_pos': ['SKIP', 5479],
'cli_root/zfs_rollback/zfs_rollback_001_pos': ['FAIL', known_reason],
'cli_root/zfs_rollback/zfs_rollback_002_pos': ['FAIL', known_reason],
'cli_root/zfs_snapshot/zfs_snapshot_002_neg': ['FAIL', known_reason],
'cli_root/zfs_unshare/zfs_unshare_006_pos': ['SKIP', na_reason],
'cli_root/zpool_add/zpool_add_004_pos': ['FAIL', known_reason],
'cli_root/zpool_destroy/zpool_destroy_001_pos': ['SKIP', 6145],
'cli_root/zpool_import/zpool_import_missing_003_pos': ['SKIP', 6839],
'cli_root/zpool_initialize/zpool_initialize_import_export':
['FAIL', 11948],
'cli_root/zpool_labelclear/zpool_labelclear_removed':
['FAIL', known_reason],
'cli_root/zpool_trim/setup': ['SKIP', trim_reason],
'cli_root/zpool_upgrade/zpool_upgrade_004_pos': ['FAIL', 6141],
'delegate/setup': ['SKIP', exec_reason],
'fallocate/fallocate_punch-hole': ['SKIP', fspacectl_reason],
'history/history_004_pos': ['FAIL', 7026],
'history/history_005_neg': ['FAIL', 6680],
'history/history_006_neg': ['FAIL', 5657],
'history/history_008_pos': ['FAIL', known_reason],
'history/history_010_pos': ['SKIP', exec_reason],
'io/mmap': ['SKIP', fio_reason],
'largest_pool/largest_pool_001_pos': ['FAIL', known_reason],
'mmp/mmp_on_uberblocks': ['FAIL', known_reason],
'pam/setup': ['SKIP', "pamtester might be not available"],
'pool_checkpoint/checkpoint_discard_busy': ['FAIL', 11946],
'projectquota/setup': ['SKIP', exec_reason],
'removal/removal_condense_export': ['FAIL', known_reason],
'renameat2/setup': ['SKIP', renameat2_reason],
'reservation/reservation_008_pos': ['FAIL', 7741],
'reservation/reservation_018_pos': ['FAIL', 5642],
'snapshot/clone_001_pos': ['FAIL', known_reason],
'snapshot/snapshot_009_pos': ['FAIL', 7961],
'snapshot/snapshot_010_pos': ['FAIL', 7961],
'snapused/snapused_004_pos': ['FAIL', 5513],
'tmpfile/setup': ['SKIP', tmpfile_reason],
'trim/setup': ['SKIP', trim_reason],
'upgrade/upgrade_projectquota_001_pos': ['SKIP', project_id_reason],
'user_namespace/setup': ['SKIP', user_ns_reason],
'userquota/setup': ['SKIP', exec_reason],
'vdev_zaps/vdev_zaps_004_pos': ['FAIL', known_reason],
'zvol/zvol_ENOSPC/zvol_ENOSPC_001_pos': ['FAIL', 5848],
}
if sys.platform.startswith('freebsd'):
maybe.update({
'cli_root/zfs_copies/zfs_copies_002_pos': ['FAIL', known_reason],
'cli_root/zfs_inherit/zfs_inherit_001_neg': ['FAIL', known_reason],
'cli_root/zfs_share/zfs_share_concurrent_shares':
['FAIL', known_reason],
'cli_root/zpool_import/zpool_import_012_pos': ['FAIL', known_reason],
'delegate/zfs_allow_003_pos': ['FAIL', known_reason],
'inheritance/inherit_001_pos': ['FAIL', 11829],
'resilver/resilver_restart_001': ['FAIL', known_reason],
'pool_checkpoint/checkpoint_big_rewind': ['FAIL', 12622],
'pool_checkpoint/checkpoint_indirect': ['FAIL', 12623],
'snapshot/snapshot_002_pos': ['FAIL', '14831'],
})
elif sys.platform.startswith('linux'):
maybe.update({
'cli_root/zfs_rename/zfs_rename_002_pos': ['FAIL', known_reason],
'cli_root/zpool_reopen/zpool_reopen_003_pos': ['FAIL', known_reason],
'fault/auto_online_002_pos': ['FAIL', 11889],
'fault/auto_replace_001_pos': ['FAIL', 14851],
'fault/auto_spare_002_pos': ['FAIL', 11889],
'fault/auto_spare_multiple': ['FAIL', 11889],
'fault/auto_spare_shared': ['FAIL', 11889],
'fault/decompress_fault': ['FAIL', 11889],
'io/io_uring': ['SKIP', 'io_uring support required'],
'limits/filesystem_limit': ['SKIP', known_reason],
'limits/snapshot_limit': ['SKIP', known_reason],
'mmp/mmp_active_import': ['FAIL', known_reason],
'mmp/mmp_exported_import': ['FAIL', known_reason],
'mmp/mmp_inactive_import': ['FAIL', known_reason],
'zvol/zvol_misc/zvol_misc_snapdev': ['FAIL', 12621],
'zvol/zvol_misc/zvol_misc_volmode': ['FAIL', known_reason],
'zvol/zvol_misc/zvol_misc_fua': ['SKIP', 14872],
'zvol/zvol_misc/zvol_misc_trim': ['SKIP', 14872],
'idmap_mount/idmap_mount_001': ['SKIP', idmap_reason],
'idmap_mount/idmap_mount_002': ['SKIP', idmap_reason],
'idmap_mount/idmap_mount_003': ['SKIP', idmap_reason],
'idmap_mount/idmap_mount_004': ['SKIP', idmap_reason],
'idmap_mount/idmap_mount_005': ['SKIP', idmap_reason],
+ 'block_cloning/block_cloning_disabled_copyfilerange':
+ ['SKIP', cfr_reason],
+ 'block_cloning/block_cloning_copyfilerange':
+ ['SKIP', cfr_reason],
+ 'block_cloning/block_cloning_copyfilerange_partial':
+ ['SKIP', cfr_reason],
+ 'block_cloning/block_cloning_copyfilerange_fallback':
+ ['SKIP', cfr_reason],
+ 'block_cloning/block_cloning_copyfilerange_cross_dataset':
+ ['SKIP', cfr_cross_reason],
+ 'block_cloning/block_cloning_copyfilerange_fallback_same_txg':
+ ['SKIP', cfr_cross_reason],
})
# Not all Github actions runners have scsi_debug module, so we may skip
# some tests which use it.
if os.environ.get('CI') == 'true':
known.update({
'cli_root/zpool_expand/zpool_expand_001_pos': ['SKIP', ci_reason],
'cli_root/zpool_expand/zpool_expand_003_neg': ['SKIP', ci_reason],
'cli_root/zpool_expand/zpool_expand_005_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/setup': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_001_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_002_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_003_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_004_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_005_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_006_neg': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_007_pos': ['SKIP', ci_reason],
'cli_root/zpool_split/zpool_split_wholedisk': ['SKIP', ci_reason],
'fault/auto_offline_001_pos': ['SKIP', ci_reason],
'fault/auto_online_001_pos': ['SKIP', ci_reason],
'fault/auto_online_002_pos': ['SKIP', ci_reason],
'fault/auto_replace_001_pos': ['SKIP', ci_reason],
'fault/auto_spare_ashift': ['SKIP', ci_reason],
'fault/auto_spare_shared': ['SKIP', ci_reason],
'procfs/pool_state': ['SKIP', ci_reason],
})
maybe.update({
'events/events_002_pos': ['FAIL', 11546],
})
def process_results(pathname):
try:
f = open(pathname)
except IOError as e:
print('Error opening file:', e)
sys.exit(1)
prefix = '/zfs-tests/tests/(?:functional|perf/regression)/'
pattern = \
r'^Test(?:\s+\(\S+\))?:' + \
rf'\s*\S*{prefix}(\S+)' + \
r'\s*\(run as (\S+)\)\s*\[(\S+)\]\s*\[(\S+)\]'
pattern_log = r'^\s*Log directory:\s*(\S*)'
d = {}
logdir = 'Could not determine log directory.'
for line in f.readlines():
m = re.match(pattern, line)
if m and len(m.groups()) == 4:
d[m.group(1)] = m.group(4)
continue
m = re.match(pattern_log, line)
if m:
logdir = m.group(1)
return d, logdir
class ListMaybesAction(argparse.Action):
def __init__(self,
option_strings,
dest="SUPPRESS",
default="SUPPRESS",
help="list flaky tests and exit"):
super(ListMaybesAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
for test in maybe:
print(test)
sys.exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Analyze ZTS logs')
parser.add_argument('logfile')
parser.add_argument('--list-maybes', action=ListMaybesAction)
parser.add_argument('--no-maybes', action='store_false', dest='maybes')
args = parser.parse_args()
results, logdir = process_results(args.logfile)
if not results:
print("\n\nNo test results were found.")
print("Log directory:", logdir)
sys.exit(0)
expected = []
unexpected = []
all_maybes = True
for test in list(results.keys()):
if results[test] == "PASS":
continue
setup = test.replace(os.path.basename(test), "setup")
if results[test] == "SKIP" and test != setup:
if setup in known and known[setup][0] == "SKIP":
continue
if setup in maybe and maybe[setup][0] == "SKIP":
continue
if (test in known and results[test] in known[test][0]):
expected.append(test)
elif test in maybe and results[test] in maybe[test][0]:
if results[test] == 'SKIP' or args.maybes:
expected.append(test)
elif not args.maybes:
unexpected.append(test)
else:
unexpected.append(test)
all_maybes = False
print("\nTests with results other than PASS that are expected:")
for test in sorted(expected):
issue_url = 'https://github.com/openzfs/zfs/issues/'
# Include the reason why the result is expected, given the following:
# 1. Suppress test results which set the "Not applicable" reason.
# 2. Numerical reasons are assumed to be GitHub issue numbers.
# 3. When an entire test group is skipped only report the setup reason.
if test in known:
if known[test][1] == na_reason:
continue
elif isinstance(known[test][1], int):
expect = f"{issue_url}{known[test][1]}"
else:
expect = known[test][1]
elif test in maybe:
if isinstance(maybe[test][1], int):
expect = f"{issue_url}{maybe[test][1]}"
else:
expect = maybe[test][1]
elif setup in known and known[setup][0] == "SKIP" and setup != test:
continue
elif setup in maybe and maybe[setup][0] == "SKIP" and setup != test:
continue
else:
expect = "UNKNOWN REASON"
print(f" {results[test]} {test} ({expect})")
print("\nTests with result of PASS that are unexpected:")
for test in sorted(known.keys()):
# We probably should not be silently ignoring the case
# where "test" is not in "results".
if test not in results or results[test] != "PASS":
continue
print(f" {results[test]} {test} (expected {known[test][0]})")
print("\nTests with results other than PASS that are unexpected:")
for test in sorted(unexpected):
expect = "PASS" if test not in known else known[test][0]
print(f" {results[test]} {test} (expected {expect})")
if len(unexpected) == 0:
sys.exit(0)
elif not args.maybes and all_maybes:
sys.exit(2)
else:
sys.exit(1)
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/.gitignore b/sys/contrib/openzfs/tests/zfs-tests/cmd/.gitignore
index f68f58072818..5f53b687191a 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/.gitignore
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/.gitignore
@@ -1,51 +1,52 @@
/badsend
/btree_test
/chg_usr_exec
+/clonefile
/devname2devid
/dir_rd_update
/draid
/file_fadvise
/file_append
/file_check
/file_trunc
/file_write
/get_diff
/getversion
/largest_file
/libzfs_input_check
/mkbusy
/mkfile
/mkfiles
/mktree
/mmap_exec
/mmap_libaio
/mmap_seek
/mmap_sync
/mmapwrite
/nvlist_to_lua
/randfree_file
/randwritecomp
/read_dos_attributes
/readmmap
/renameat2
/rename_dir
/rm_lnkcnt_zero_file
/send_doall
/stride_dd
/threadsappend
/user_ns_exec
/write_dos_attributes
/xattrtest
/zed_fd_spill-zedlet
/suid_write_to_file
/cp_files
/ctime
/truncate_test
/ereports
/zfs_diff-socket
/dosmode_readonly_write
/blake3_test
/edonr_test
/skein_test
/sha2_test
/idmap_util
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/cmd/Makefile.am
index 066abb6ce3b5..9bdb3c209756 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/Makefile.am
@@ -1,135 +1,136 @@
scripts_zfs_tests_bindir = $(datadir)/$(PACKAGE)/zfs-tests/bin
scripts_zfs_tests_bin_PROGRAMS = %D%/chg_usr_exec
scripts_zfs_tests_bin_PROGRAMS += %D%/cp_files
scripts_zfs_tests_bin_PROGRAMS += %D%/ctime
scripts_zfs_tests_bin_PROGRAMS += %D%/dir_rd_update
scripts_zfs_tests_bin_PROGRAMS += %D%/dosmode_readonly_write
scripts_zfs_tests_bin_PROGRAMS += %D%/get_diff
scripts_zfs_tests_bin_PROGRAMS += %D%/rename_dir
scripts_zfs_tests_bin_PROGRAMS += %D%/suid_write_to_file
scripts_zfs_tests_bin_PROGRAMS += %D%/truncate_test
scripts_zfs_tests_bin_PROGRAMS += %D%/zfs_diff-socket
scripts_zfs_tests_bin_PROGRAMS += %D%/badsend
%C%_badsend_LDADD = \
libzfs_core.la \
libzfs.la \
libnvpair.la
scripts_zfs_tests_bin_PROGRAMS += %D%/btree_test
%C%_btree_test_CPPFLAGS = $(AM_CPPFLAGS) $(FORCEDEBUG_CPPFLAGS)
%C%_btree_test_LDADD = \
libzpool.la \
libzfs_core.la
if WANT_DEVNAME2DEVID
scripts_zfs_tests_bin_PROGRAMS += %D%/devname2devid
%C%_devname2devid_CFLAGS = $(AM_CFLAGS) $(LIBUDEV_CFLAGS)
%C%_devname2devid_LDADD = $(LIBUDEV_LIBS)
endif
scripts_zfs_tests_bin_PROGRAMS += %D%/draid
%C%_draid_CFLAGS = $(AM_CFLAGS) $(ZLIB_CFLAGS)
%C%_draid_LDADD = \
libzpool.la \
libnvpair.la
%C%_draid_LDADD += $(ZLIB_LIBS)
dist_noinst_DATA += %D%/file/file_common.h
scripts_zfs_tests_bin_PROGRAMS += %D%/file_append %D%/file_check %D%/file_trunc %D%/file_write %D%/largest_file %D%/randwritecomp
%C%_file_append_SOURCES = %D%/file/file_append.c
%C%_file_check_SOURCES = %D%/file/file_check.c
%C%_file_trunc_SOURCES = %D%/file/file_trunc.c
%C%_file_write_SOURCES = %D%/file/file_write.c
%C%_largest_file_SOURCES = %D%/file/largest_file.c
%C%_randwritecomp_SOURCES = %D%/file/randwritecomp.c
scripts_zfs_tests_bin_PROGRAMS += %D%/libzfs_input_check
%C%_libzfs_input_check_CPPFLAGS = $(AM_CPPFLAGS) -I$(top_srcdir)/include/os/@ac_system_l@/zfs
%C%_libzfs_input_check_LDADD = \
libzfs_core.la \
libnvpair.la
scripts_zfs_tests_bin_PROGRAMS += %D%/mkbusy %D%/mkfile %D%/mkfiles %D%/mktree
%C%_mkfile_LDADD = $(LTLIBINTL)
scripts_zfs_tests_bin_PROGRAMS += %D%/mmap_exec %D%/mmap_seek %D%/mmap_sync %D%/mmapwrite %D%/readmmap
%C%_mmapwrite_LDADD = -lpthread
if WANT_MMAP_LIBAIO
scripts_zfs_tests_bin_PROGRAMS += %D%/mmap_libaio
%C%_mmap_libaio_CFLAGS = $(AM_CFLAGS) $(LIBAIO_CFLAGS)
%C%_mmap_libaio_LDADD = $(LIBAIO_LIBS)
endif
scripts_zfs_tests_bin_PROGRAMS += %D%/nvlist_to_lua
%C%_nvlist_to_lua_LDADD = \
libzfs_core.la \
libnvpair.la
scripts_zfs_tests_bin_PROGRAMS += %D%/rm_lnkcnt_zero_file
%C%_rm_lnkcnt_zero_file_LDADD = -lpthread
scripts_zfs_tests_bin_PROGRAMS += %D%/send_doall
%C%_send_doall_LDADD = \
libzfs_core.la \
libzfs.la \
libnvpair.la
scripts_zfs_tests_bin_PROGRAMS += %D%/stride_dd
%C%_stride_dd_LDADD = -lrt
scripts_zfs_tests_bin_PROGRAMS += %D%/threadsappend
%C%_threadsappend_LDADD = -lpthread
scripts_zfs_tests_bin_PROGRAMS += %D%/ereports
%C%_ereports_LDADD = \
libnvpair.la \
libzfs.la
scripts_zfs_tests_bin_PROGRAMS += %D%/edonr_test %D%/skein_test \
%D%/sha2_test %D%/blake3_test
%C%_skein_test_SOURCES = %D%/checksum/skein_test.c
%C%_sha2_test_SOURCES = %D%/checksum/sha2_test.c
%C%_edonr_test_SOURCES = %D%/checksum/edonr_test.c
%C%_blake3_test_SOURCES = %D%/checksum/blake3_test.c
%C%_skein_test_LDADD = \
libicp.la \
libspl.la \
libspl_assert.la
%C%_sha2_test_LDADD = $(%C%_skein_test_LDADD)
%C%_edonr_test_LDADD = $(%C%_skein_test_LDADD)
%C%_blake3_test_LDADD = $(%C%_skein_test_LDADD)
if BUILD_LINUX
scripts_zfs_tests_bin_PROGRAMS += %D%/getversion
scripts_zfs_tests_bin_PROGRAMS += %D%/user_ns_exec
scripts_zfs_tests_bin_PROGRAMS += %D%/renameat2
scripts_zfs_tests_bin_PROGRAMS += %D%/xattrtest
scripts_zfs_tests_bin_PROGRAMS += %D%/zed_fd_spill-zedlet
scripts_zfs_tests_bin_PROGRAMS += %D%/idmap_util
+scripts_zfs_tests_bin_PROGRAMS += %D%/clonefile
%C%_idmap_util_LDADD = libspl.la
dist_noinst_DATA += %D%/linux_dos_attributes/dos_attributes.h
scripts_zfs_tests_bin_PROGRAMS += %D%/read_dos_attributes %D%/write_dos_attributes
%C%_read_dos_attributes_SOURCES = %D%/linux_dos_attributes/read_dos_attributes.c
%C%_write_dos_attributes_SOURCES = %D%/linux_dos_attributes/write_dos_attributes.c
scripts_zfs_tests_bin_PROGRAMS += %D%/randfree_file
%C%_randfree_file_SOURCES = %D%/file/randfree_file.c
scripts_zfs_tests_bin_PROGRAMS += %D%/file_fadvise
%C%_file_fadvise_SOURCES = %D%/file/file_fadvise.c
endif
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/clonefile.c b/sys/contrib/openzfs/tests/zfs-tests/cmd/clonefile.c
new file mode 100644
index 000000000000..696dc471d8c3
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/clonefile.c
@@ -0,0 +1,333 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2023, Rob Norris <robn@despairlabs.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/*
+ * This program is to test the availability and behaviour of copy_file_range,
+ * FICLONE, FICLONERANGE and FIDEDUPERANGE in the Linux kernel. It should
+ * compile and run even if these features aren't exposed through the libc.
+ */
+
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#ifndef __NR_copy_file_range
+#if defined(__x86_64__)
+#define __NR_copy_file_range (326)
+#elif defined(__i386__)
+#define __NR_copy_file_range (377)
+#elif defined(__s390__)
+#define __NR_copy_file_range (375)
+#elif defined(__arm__)
+#define __NR_copy_file_range (391)
+#elif defined(__aarch64__)
+#define __NR_copy_file_range (285)
+#elif defined(__powerpc__)
+#define __NR_copy_file_range (379)
+#else
+#error "no definition of __NR_copy_file_range for this platform"
+#endif
+#endif /* __NR_copy_file_range */
+
+ssize_t
+copy_file_range(int, loff_t *, int, loff_t *, size_t, unsigned int)
+ __attribute__((weak));
+
+static inline ssize_t
+cf_copy_file_range(int sfd, loff_t *soff, int dfd, loff_t *doff,
+ size_t len, unsigned int flags)
+{
+ if (copy_file_range)
+ return (copy_file_range(sfd, soff, dfd, doff, len, flags));
+ return (
+ syscall(__NR_copy_file_range, sfd, soff, dfd, doff, len, flags));
+}
+
+/* Define missing FICLONE */
+#ifdef FICLONE
+#define CF_FICLONE FICLONE
+#else
+#define CF_FICLONE _IOW(0x94, 9, int)
+#endif
+
+/* Define missing FICLONERANGE and support structs */
+#ifdef FICLONERANGE
+#define CF_FICLONERANGE FICLONERANGE
+typedef struct file_clone_range cf_file_clone_range_t;
+#else
+typedef struct {
+ int64_t src_fd;
+ uint64_t src_offset;
+ uint64_t src_length;
+ uint64_t dest_offset;
+} cf_file_clone_range_t;
+#define CF_FICLONERANGE _IOW(0x94, 13, cf_file_clone_range_t)
+#endif
+
+/* Define missing FIDEDUPERANGE and support structs */
+#ifdef FIDEDUPERANGE
+#define CF_FIDEDUPERANGE FIDEDUPERANGE
+#define CF_FILE_DEDUPE_RANGE_SAME FILE_DEDUPE_RANGE_SAME
+#define CF_FILE_DEDUPE_RANGE_DIFFERS FILE_DEDUPE_RANGE_DIFFERS
+typedef struct file_dedupe_range_info cf_file_dedupe_range_info_t;
+typedef struct file_dedupe_range cf_file_dedupe_range_t;
+#else
+typedef struct {
+ int64_t dest_fd;
+ uint64_t dest_offset;
+ uint64_t bytes_deduped;
+ int32_t status;
+ uint32_t reserved;
+} cf_file_dedupe_range_info_t;
+typedef struct {
+ uint64_t src_offset;
+ uint64_t src_length;
+ uint16_t dest_count;
+ uint16_t reserved1;
+ uint32_t reserved2;
+ cf_file_dedupe_range_info_t info[0];
+} cf_file_dedupe_range_t;
+#define CF_FIDEDUPERANGE _IOWR(0x94, 54, cf_file_dedupe_range_t)
+#define CF_FILE_DEDUPE_RANGE_SAME (0)
+#define CF_FILE_DEDUPE_RANGE_DIFFERS (1)
+#endif
+
+typedef enum {
+ CF_MODE_NONE,
+ CF_MODE_CLONE,
+ CF_MODE_CLONERANGE,
+ CF_MODE_COPYFILERANGE,
+ CF_MODE_DEDUPERANGE,
+} cf_mode_t;
+
+static int
+usage(void)
+{
+ printf(
+ "usage:\n"
+ " FICLONE:\n"
+ " clonefile -c <src> <dst>\n"
+ " FICLONERANGE:\n"
+ " clonefile -r <src> <dst> <soff> <doff> <len>\n"
+ " copy_file_range:\n"
+ " clonefile -f <src> <dst> <soff> <doff> <len>\n"
+ " FIDEDUPERANGE:\n"
+ " clonefile -d <src> <dst> <soff> <doff> <len>\n");
+ return (1);
+}
+
+int do_clone(int sfd, int dfd);
+int do_clonerange(int sfd, int dfd, loff_t soff, loff_t doff, size_t len);
+int do_copyfilerange(int sfd, int dfd, loff_t soff, loff_t doff, size_t len);
+int do_deduperange(int sfd, int dfd, loff_t soff, loff_t doff, size_t len);
+
+int quiet = 0;
+
+int
+main(int argc, char **argv)
+{
+ cf_mode_t mode = CF_MODE_NONE;
+
+ char c;
+ while ((c = getopt(argc, argv, "crfdq")) != -1) {
+ switch (c) {
+ case 'c':
+ mode = CF_MODE_CLONE;
+ break;
+ case 'r':
+ mode = CF_MODE_CLONERANGE;
+ break;
+ case 'f':
+ mode = CF_MODE_COPYFILERANGE;
+ break;
+ case 'd':
+ mode = CF_MODE_DEDUPERANGE;
+ break;
+ case 'q':
+ quiet = 1;
+ break;
+ }
+ }
+
+ if (mode == CF_MODE_NONE || (argc-optind) < 2 ||
+ (mode != CF_MODE_CLONE && (argc-optind) < 5))
+ return (usage());
+
+ loff_t soff = 0, doff = 0;
+ size_t len = 0;
+ if (mode != CF_MODE_CLONE) {
+ soff = strtoull(argv[optind+2], NULL, 10);
+ if (soff == ULLONG_MAX) {
+ fprintf(stderr, "invalid source offset");
+ return (1);
+ }
+ doff = strtoull(argv[optind+3], NULL, 10);
+ if (doff == ULLONG_MAX) {
+ fprintf(stderr, "invalid dest offset");
+ return (1);
+ }
+ len = strtoull(argv[optind+4], NULL, 10);
+ if (len == ULLONG_MAX) {
+ fprintf(stderr, "invalid length");
+ return (1);
+ }
+ }
+
+ int sfd = open(argv[optind], O_RDONLY);
+ if (sfd < 0) {
+ fprintf(stderr, "open: %s: %s\n",
+ argv[optind], strerror(errno));
+ return (1);
+ }
+
+ int dfd = open(argv[optind+1], O_WRONLY|O_CREAT,
+ S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH);
+ if (dfd < 0) {
+ fprintf(stderr, "open: %s: %s\n",
+ argv[optind+1], strerror(errno));
+ close(sfd);
+ return (1);
+ }
+
+ int err;
+ switch (mode) {
+ case CF_MODE_CLONE:
+ err = do_clone(sfd, dfd);
+ break;
+ case CF_MODE_CLONERANGE:
+ err = do_clonerange(sfd, dfd, soff, doff, len);
+ break;
+ case CF_MODE_COPYFILERANGE:
+ err = do_copyfilerange(sfd, dfd, soff, doff, len);
+ break;
+ case CF_MODE_DEDUPERANGE:
+ err = do_deduperange(sfd, dfd, soff, doff, len);
+ break;
+ default:
+ abort();
+ }
+
+ off_t spos = lseek(sfd, 0, SEEK_CUR);
+ off_t slen = lseek(sfd, 0, SEEK_END);
+ off_t dpos = lseek(dfd, 0, SEEK_CUR);
+ off_t dlen = lseek(dfd, 0, SEEK_END);
+
+ fprintf(stderr, "file offsets: src=%lu/%lu; dst=%lu/%lu\n", spos, slen,
+ dpos, dlen);
+
+ close(dfd);
+ close(sfd);
+
+ return (err == 0 ? 0 : 1);
+}
+
+int
+do_clone(int sfd, int dfd)
+{
+ fprintf(stderr, "using FICLONE\n");
+ int err = ioctl(dfd, CF_FICLONE, sfd);
+ if (err < 0) {
+ fprintf(stderr, "ioctl(FICLONE): %s\n", strerror(errno));
+ return (err);
+ }
+ return (0);
+}
+
+int
+do_clonerange(int sfd, int dfd, loff_t soff, loff_t doff, size_t len)
+{
+ fprintf(stderr, "using FICLONERANGE\n");
+ cf_file_clone_range_t fcr = {
+ .src_fd = sfd,
+ .src_offset = soff,
+ .src_length = len,
+ .dest_offset = doff,
+ };
+ int err = ioctl(dfd, CF_FICLONERANGE, &fcr);
+ if (err < 0) {
+ fprintf(stderr, "ioctl(FICLONERANGE): %s\n", strerror(errno));
+ return (err);
+ }
+ return (0);
+}
+
+int
+do_copyfilerange(int sfd, int dfd, loff_t soff, loff_t doff, size_t len)
+{
+ fprintf(stderr, "using copy_file_range\n");
+ ssize_t copied = cf_copy_file_range(sfd, &soff, dfd, &doff, len, 0);
+ if (copied < 0) {
+ fprintf(stderr, "copy_file_range: %s\n", strerror(errno));
+ return (1);
+ }
+ if (copied != len) {
+ fprintf(stderr, "copy_file_range: copied less than requested: "
+ "requested=%lu; copied=%lu\n", len, copied);
+ return (1);
+ }
+ return (0);
+}
+
+int
+do_deduperange(int sfd, int dfd, loff_t soff, loff_t doff, size_t len)
+{
+ fprintf(stderr, "using FIDEDUPERANGE\n");
+
+ char buf[sizeof (cf_file_dedupe_range_t)+
+ sizeof (cf_file_dedupe_range_info_t)] = {0};
+ cf_file_dedupe_range_t *fdr = (cf_file_dedupe_range_t *)&buf[0];
+ cf_file_dedupe_range_info_t *fdri =
+ (cf_file_dedupe_range_info_t *)
+ &buf[sizeof (cf_file_dedupe_range_t)];
+
+ fdr->src_offset = soff;
+ fdr->src_length = len;
+ fdr->dest_count = 1;
+
+ fdri->dest_fd = dfd;
+ fdri->dest_offset = doff;
+
+ int err = ioctl(sfd, CF_FIDEDUPERANGE, fdr);
+ if (err != 0)
+ fprintf(stderr, "ioctl(FIDEDUPERANGE): %s\n", strerror(errno));
+
+ if (fdri->status < 0) {
+ fprintf(stderr, "dedup failed: %s\n", strerror(-fdri->status));
+ err = -1;
+ } else if (fdri->status == CF_FILE_DEDUPE_RANGE_DIFFERS) {
+ fprintf(stderr, "dedup failed: range differs\n");
+ err = -1;
+ }
+
+ return (err);
+}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/readmmap.c b/sys/contrib/openzfs/tests/zfs-tests/cmd/readmmap.c
index 704ffd55c8a5..a5c8079d0e46 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/readmmap.c
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/readmmap.c
@@ -1,138 +1,139 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* --------------------------------------------------------------
* BugId 5047993 : Getting bad read data.
*
* Usage: readmmap <filename>
*
* where:
* filename is an absolute path to the file name.
*
* Return values:
* 1 : error
* 0 : no errors
* --------------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <sys/mman.h>
+#include <sys/types.h>
#include <time.h>
int
main(int argc, char **argv)
{
const char *filename = "badfile";
size_t size = 4395;
size_t idx = 0;
char *buf = NULL;
char *map = NULL;
int fd = -1, bytes, retval = 0;
uint_t seed;
if (argc < 2 || optind == argc) {
(void) fprintf(stderr,
"usage: %s <file name>\n", argv[0]);
exit(1);
}
if ((buf = calloc(1, size)) == NULL) {
perror("calloc");
exit(1);
}
filename = argv[optind];
(void) remove(filename);
fd = open(filename, O_RDWR|O_CREAT|O_TRUNC, 0666);
if (fd == -1) {
perror("open to create");
retval = 1;
goto end;
}
bytes = write(fd, buf, size);
if (bytes != size) {
(void) printf("short write: %d != %zd\n", bytes, size);
retval = 1;
goto end;
}
map = mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
if (map == MAP_FAILED) {
perror("mmap");
retval = 1;
goto end;
}
seed = (uint_t)time(NULL);
srandom(seed);
idx = random() % size;
map[idx] = 1;
if (msync(map, size, MS_SYNC) != 0) {
perror("msync");
retval = 1;
goto end;
}
if (munmap(map, size) != 0) {
perror("munmap");
retval = 1;
goto end;
}
bytes = pread(fd, buf, size, 0);
if (bytes != size) {
(void) printf("short read: %d != %zd\n", bytes, size);
retval = 1;
goto end;
}
if (buf[idx] != 1) {
(void) printf(
"bad data from read! got buf[%zd]=%d, expected 1\n",
idx, buf[idx]);
retval = 1;
goto end;
}
(void) printf("good data from read: buf[%zd]=1\n", idx);
end:
if (fd != -1) {
(void) close(fd);
}
if (buf != NULL) {
free(buf);
}
return (retval);
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg b/sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg
index b3cfe149ffa7..fa545e06bbf3 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg
+++ b/sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg
@@ -1,232 +1,233 @@
#
# Copyright (c) 2016, 2019 by Delphix. All rights reserved.
# These variables are used by zfs-tests.sh to constrain which utilities
# may be used by the suite. The suite will create a directory which is
# the only element of $PATH and create symlinks from that dir to the
# binaries listed below.
#
# Please keep the contents of each variable sorted for ease of reading
# and maintenance.
#
export SYSTEM_FILES_COMMON='awk
basename
bc
bunzip2
bzcat
cat
chgrp
chmod
chown
cksum
cmp
cp
cpio
cut
date
dd
df
diff
dirname
dmesg
du
echo
env
expr
false
file
find
fio
getconf
getent
getfacl
grep
gunzip
gzip
head
hostname
id
iostat
kill
ksh
ldd
ln
ls
mkdir
mknod
mkfifo
mktemp
mount
mv
net
od
openssl
pamtester
pax
pgrep
ping
pkill
printf
ps
python3
readlink
rm
rmdir
rsync
scp
script
sed
seq
setfacl
sh
sleep
sort
ssh
stat
strings
sudo
swapoff
swapon
sync
tail
tar
timeout
touch
tr
true
truncate
umount
uname
uniq
vmstat
wc'
export SYSTEM_FILES_FREEBSD='chflags
compress
diskinfo
fsck
getextattr
gpart
jail
jexec
jls
lsextattr
md5
mdconfig
newfs
pw
rmextattr
setextattr
sha256
showmount
swapctl
sysctl
trim
uncompress'
export SYSTEM_FILES_LINUX='attr
blkid
blkdiscard
blockdev
chattr
exportfs
fallocate
free
getfattr
groupadd
groupdel
groupmod
hostid
losetup
lsattr
lsblk
lscpu
lsmod
lsscsi
md5sum
mkswap
modprobe
mpstat
nsenter
parted
perf
setfattr
sha256sum
udevadm
unshare
useradd
userdel
usermod
setpriv
mountpoint
flock
logger'
export ZFS_FILES='zdb
zfs
zhack
zinject
zpool
ztest
raidz_test
arc_summary
arcstat
zilstat
dbufstat
mount.zfs
zed
zgenhostid
zstream
zfs_ids_to_path
zpool_influxdb'
export ZFSTEST_FILES='badsend
btree_test
chg_usr_exec
+ clonefile
devname2devid
dir_rd_update
draid
file_fadvise
file_append
file_check
file_trunc
file_write
get_diff
getversion
largest_file
libzfs_input_check
mkbusy
mkfile
mkfiles
mktree
mmap_exec
mmap_libaio
mmap_seek
mmap_sync
mmapwrite
nvlist_to_lua
randfree_file
randwritecomp
readmmap
read_dos_attributes
renameat2
rename_dir
rm_lnkcnt_zero_file
send_doall
threadsappend
user_ns_exec
write_dos_attributes
xattrtest
stride_dd
zed_fd_spill-zedlet
suid_write_to_file
cp_files
blake3_test
edonr_test
skein_test
sha2_test
ctime
truncate_test
ereports
zfs_diff-socket
dosmode_readonly_write
idmap_util'
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
index ff65dc1ac2b0..66aff5026f8f 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
@@ -1,2052 +1,2066 @@
CLEANFILES =
dist_noinst_DATA =
include $(top_srcdir)/config/Substfiles.am
datadir_zfs_tests_testsdir = $(datadir)/$(PACKAGE)/zfs-tests/tests
nobase_dist_datadir_zfs_tests_tests_DATA = \
perf/nfs-sample.cfg \
perf/perf.shlib \
\
perf/fio/mkfiles.fio \
perf/fio/random_reads.fio \
perf/fio/random_readwrite.fio \
perf/fio/random_readwrite_fixed.fio \
perf/fio/random_writes.fio \
perf/fio/sequential_reads.fio \
perf/fio/sequential_readwrite.fio \
perf/fio/sequential_writes.fio
nobase_dist_datadir_zfs_tests_tests_SCRIPTS = \
perf/regression/random_reads.ksh \
perf/regression/random_readwrite.ksh \
perf/regression/random_readwrite_fixed.ksh \
perf/regression/random_writes.ksh \
perf/regression/random_writes_zil.ksh \
perf/regression/sequential_reads_arc_cached_clone.ksh \
perf/regression/sequential_reads_arc_cached.ksh \
perf/regression/sequential_reads_dbuf_cached.ksh \
perf/regression/sequential_reads.ksh \
perf/regression/sequential_writes.ksh \
perf/regression/setup.ksh \
\
perf/scripts/prefetch_io.sh
# These lists can be regenerated by running make regen-tests at the root, or, on a *clean* source:
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' ! -executable -name '*.in' | sort | sed 's/\.in$//;s/^/\t/;$!s/$/ \\/'
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' -executable -name '*.in' | sort | sed 's/\.in$//;s/^/\t/;$!s/$/ \\/'
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' ! -name '*.in' ! -name '*.c' | grep -Fe /simd -e /tmpfile | sort | sed 's/^/\t/;$!s/$/ \\/'
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' ! -executable ! -name '*.in' ! -name '*.c' | grep -vFe /simd -e /tmpfile | sort | sed 's/^/\t/;$!s/$/ \\/'
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' -executable ! -name '*.in' ! -name '*.c' | grep -vFe /simd -e /tmpfile | sort | sed 's/^/\t/;$!s/$/ \\/'
#
# simd and tmpfile are Linux-only and not installed elsewhere
#
# C programs are specced in ../Makefile.am above as part of the main Makefile
find_common := find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po'
regen:
@$(MAKE) -C $(top_builddir) clean
@$(MAKE) clean
$(SED) $(ac_inplace) '/^# -- >8 --/q' Makefile.am
echo >> Makefile.am
echo 'nobase_nodist_datadir_zfs_tests_tests_DATA = \' >> Makefile.am
$(find_common) ! -executable -name '*.in' | sort | sed 's/\.in$$//;s/^/\t/;$$!s/$$/ \\/' >> Makefile.am
echo 'nobase_nodist_datadir_zfs_tests_tests_SCRIPTS = \' >> Makefile.am
$(find_common) -executable -name '*.in' | sort | sed 's/\.in$$//;s/^/\t/;$$!s/$$/ \\/' >> Makefile.am
echo >> Makefile.am
echo 'SUBSTFILES += $$(nobase_nodist_datadir_zfs_tests_tests_DATA) $$(nobase_nodist_datadir_zfs_tests_tests_SCRIPTS)' >> Makefile.am
echo >> Makefile.am
echo 'if BUILD_LINUX' >> Makefile.am
echo 'nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \' >> Makefile.am
$(find_common) ! -name '*.in' ! -name '*.c' | grep -Fe /simd -e /tmpfile | sort | sed 's/^/\t/;$$!s/$$/ \\/' >> Makefile.am
echo 'endif' >> Makefile.am
echo >> Makefile.am
echo 'nobase_dist_datadir_zfs_tests_tests_DATA += \' >> Makefile.am
$(find_common) ! -executable ! -name '*.in' ! -name '*.c' | grep -vFe /simd -e /tmpfile | sort | sed 's/^/\t/;$$!s/$$/ \\/' >> Makefile.am
echo >> Makefile.am
echo 'nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \' >> Makefile.am
$(find_common) -executable ! -name '*.in' ! -name '*.c' | grep -vFe /simd -e /tmpfile | sort | sed 's/^/\t/;$$!s/$$/ \\/' >> Makefile.am
# -- >8 --
nobase_nodist_datadir_zfs_tests_tests_DATA = \
functional/pam/utilities.kshlib
nobase_nodist_datadir_zfs_tests_tests_SCRIPTS = \
functional/pyzfs/pyzfs_unittest.ksh
SUBSTFILES += $(nobase_nodist_datadir_zfs_tests_tests_DATA) $(nobase_nodist_datadir_zfs_tests_tests_SCRIPTS)
if BUILD_LINUX
nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/simd/simd_supported.ksh \
functional/tmpfile/cleanup.ksh \
functional/tmpfile/setup.ksh
endif
nobase_dist_datadir_zfs_tests_tests_DATA += \
functional/acl/acl.cfg \
functional/acl/acl_common.kshlib \
functional/alloc_class/alloc_class.cfg \
functional/alloc_class/alloc_class.kshlib \
functional/atime/atime.cfg \
functional/atime/atime_common.kshlib \
+ functional/block_cloning/block_cloning.kshlib \
functional/cache/cache.cfg \
functional/cache/cache.kshlib \
functional/cachefile/cachefile.cfg \
functional/cachefile/cachefile.kshlib \
functional/casenorm/casenorm.cfg \
functional/casenorm/casenorm.kshlib \
functional/channel_program/channel_common.kshlib \
functional/channel_program/lua_core/tst.args_to_lua.out \
functional/channel_program/lua_core/tst.args_to_lua.zcp \
functional/channel_program/lua_core/tst.divide_by_zero.err \
functional/channel_program/lua_core/tst.divide_by_zero.zcp \
functional/channel_program/lua_core/tst.exists.zcp \
functional/channel_program/lua_core/tst.large_prog.out \
functional/channel_program/lua_core/tst.large_prog.zcp \
functional/channel_program/lua_core/tst.lib_base.lua \
functional/channel_program/lua_core/tst.lib_coroutine.lua \
functional/channel_program/lua_core/tst.lib_strings.lua \
functional/channel_program/lua_core/tst.lib_table.lua \
functional/channel_program/lua_core/tst.nested_neg.zcp \
functional/channel_program/lua_core/tst.nested_pos.zcp \
functional/channel_program/lua_core/tst.recursive.zcp \
functional/channel_program/lua_core/tst.return_large.zcp \
functional/channel_program/lua_core/tst.return_recursive_table.zcp \
functional/channel_program/lua_core/tst.stack_gsub.err \
functional/channel_program/lua_core/tst.stack_gsub.zcp \
functional/channel_program/lua_core/tst.timeout.zcp \
functional/channel_program/synctask_core/tst.bookmark.copy.zcp \
functional/channel_program/synctask_core/tst.bookmark.create.zcp \
functional/channel_program/synctask_core/tst.get_index_props.out \
functional/channel_program/synctask_core/tst.get_index_props.zcp \
functional/channel_program/synctask_core/tst.get_number_props.out \
functional/channel_program/synctask_core/tst.get_number_props.zcp \
functional/channel_program/synctask_core/tst.get_string_props.out \
functional/channel_program/synctask_core/tst.get_string_props.zcp \
functional/channel_program/synctask_core/tst.promote_conflict.zcp \
functional/channel_program/synctask_core/tst.set_props.zcp \
functional/channel_program/synctask_core/tst.snapshot_destroy.zcp \
functional/channel_program/synctask_core/tst.snapshot_neg.zcp \
functional/channel_program/synctask_core/tst.snapshot_recursive.zcp \
functional/channel_program/synctask_core/tst.snapshot_rename.zcp \
functional/channel_program/synctask_core/tst.snapshot_simple.zcp \
functional/checksum/default.cfg \
functional/clean_mirror/clean_mirror_common.kshlib \
functional/clean_mirror/default.cfg \
functional/cli_root/cli_common.kshlib \
functional/cli_root/zfs_copies/zfs_copies.cfg \
functional/cli_root/zfs_copies/zfs_copies.kshlib \
functional/cli_root/zfs_create/properties.kshlib \
functional/cli_root/zfs_create/zfs_create.cfg \
functional/cli_root/zfs_create/zfs_create_common.kshlib \
functional/cli_root/zfs_destroy/zfs_destroy.cfg \
functional/cli_root/zfs_destroy/zfs_destroy_common.kshlib \
functional/cli_root/zfs_get/zfs_get_common.kshlib \
functional/cli_root/zfs_get/zfs_get_list_d.kshlib \
functional/cli_root/zfs_jail/jail.conf \
functional/cli_root/zfs_load-key/HEXKEY \
functional/cli_root/zfs_load-key/PASSPHRASE \
functional/cli_root/zfs_load-key/RAWKEY \
functional/cli_root/zfs_load-key/zfs_load-key.cfg \
functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib \
functional/cli_root/zfs_mount/zfs_mount.cfg \
functional/cli_root/zfs_mount/zfs_mount.kshlib \
functional/cli_root/zfs_promote/zfs_promote.cfg \
functional/cli_root/zfs_receive/zstd_test_data.txt \
functional/cli_root/zfs_rename/zfs_rename.cfg \
functional/cli_root/zfs_rename/zfs_rename.kshlib \
functional/cli_root/zfs_rollback/zfs_rollback.cfg \
functional/cli_root/zfs_rollback/zfs_rollback_common.kshlib \
functional/cli_root/zfs_send/zfs_send.cfg \
functional/cli_root/zfs_set/zfs_set_common.kshlib \
functional/cli_root/zfs_share/zfs_share.cfg \
functional/cli_root/zfs_snapshot/zfs_snapshot.cfg \
functional/cli_root/zfs_unmount/zfs_unmount.cfg \
functional/cli_root/zfs_unmount/zfs_unmount.kshlib \
functional/cli_root/zfs_upgrade/zfs_upgrade.kshlib \
functional/cli_root/zfs_wait/zfs_wait.kshlib \
functional/cli_root/zpool_add/zpool_add.cfg \
functional/cli_root/zpool_add/zpool_add.kshlib \
functional/cli_root/zpool_clear/zpool_clear.cfg \
functional/cli_root/zpool_create/draidcfg.gz \
functional/cli_root/zpool_create/zpool_create.cfg \
functional/cli_root/zpool_create/zpool_create.shlib \
functional/cli_root/zpool_destroy/zpool_destroy.cfg \
functional/cli_root/zpool_events/zpool_events.cfg \
functional/cli_root/zpool_events/zpool_events.kshlib \
functional/cli_root/zpool_expand/zpool_expand.cfg \
functional/cli_root/zpool_export/zpool_export.cfg \
functional/cli_root/zpool_export/zpool_export.kshlib \
functional/cli_root/zpool_get/vdev_get.cfg \
functional/cli_root/zpool_get/zpool_get.cfg \
functional/cli_root/zpool_get/zpool_get_parsable.cfg \
functional/cli_root/zpool_import/blockfiles/cryptv0.dat.bz2 \
functional/cli_root/zpool_import/blockfiles/missing_ivset.dat.bz2 \
functional/cli_root/zpool_import/blockfiles/unclean_export.dat.bz2 \
functional/cli_root/zpool_import/zpool_import.cfg \
functional/cli_root/zpool_import/zpool_import.kshlib \
functional/cli_root/zpool_initialize/zpool_initialize.kshlib \
functional/cli_root/zpool_labelclear/labelclear.cfg \
functional/cli_root/zpool_remove/zpool_remove.cfg \
functional/cli_root/zpool_reopen/zpool_reopen.cfg \
functional/cli_root/zpool_reopen/zpool_reopen.shlib \
functional/cli_root/zpool_resilver/zpool_resilver.cfg \
functional/cli_root/zpool_scrub/zpool_scrub.cfg \
functional/cli_root/zpool_split/zpool_split.cfg \
functional/cli_root/zpool_trim/zpool_trim.kshlib \
functional/cli_root/zpool_upgrade/blockfiles/zfs-broken-mirror1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-broken-mirror2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v10.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v11.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v12.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v13.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v14.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v15.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1mirror1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1mirror2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1mirror3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1raidz1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1raidz2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1raidz3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1stripe1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1stripe2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1stripe3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2mirror1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2mirror2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2mirror3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2raidz1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2raidz2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2raidz3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2stripe1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2stripe2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2stripe3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3hotspare1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3hotspare2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3hotspare3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3mirror1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3mirror2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3mirror3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz21.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz22.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz23.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3stripe1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3stripe2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3stripe3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v4.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v5.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v6.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v7.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v8.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v999.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v9.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-vBROKEN.dat.bz2 \
functional/cli_root/zpool_upgrade/zpool_upgrade.cfg \
functional/cli_root/zpool_upgrade/zpool_upgrade.kshlib \
functional/cli_root/zpool_wait/zpool_wait.kshlib \
functional/cli_root/zhack/library.kshlib \
functional/cli_user/misc/misc.cfg \
functional/cli_user/zfs_list/zfs_list.cfg \
functional/cli_user/zfs_list/zfs_list.kshlib \
functional/compression/compress.cfg \
functional/compression/testpool_zstd.tar.gz \
functional/deadman/deadman.cfg \
functional/delegate/delegate.cfg \
functional/delegate/delegate_common.kshlib \
functional/devices/devices.cfg \
functional/devices/devices_common.kshlib \
functional/events/events.cfg \
functional/events/events_common.kshlib \
functional/fault/fault.cfg \
functional/grow/grow.cfg \
functional/history/history.cfg \
functional/history/history_common.kshlib \
functional/history/i386.migratedpool.DAT.Z \
functional/history/i386.orig_history.txt \
functional/history/sparc.migratedpool.DAT.Z \
functional/history/sparc.orig_history.txt \
functional/history/zfs-pool-v4.dat.Z \
functional/inheritance/config001.cfg \
functional/inheritance/config002.cfg \
functional/inheritance/config003.cfg \
functional/inheritance/config004.cfg \
functional/inheritance/config005.cfg \
functional/inheritance/config006.cfg \
functional/inheritance/config007.cfg \
functional/inheritance/config008.cfg \
functional/inheritance/config009.cfg \
functional/inheritance/config010.cfg \
functional/inheritance/config011.cfg \
functional/inheritance/config012.cfg \
functional/inheritance/config013.cfg \
functional/inheritance/config014.cfg \
functional/inheritance/config015.cfg \
functional/inheritance/config016.cfg \
functional/inheritance/config017.cfg \
functional/inheritance/config018.cfg \
functional/inheritance/config019.cfg \
functional/inheritance/config020.cfg \
functional/inheritance/config021.cfg \
functional/inheritance/config022.cfg \
functional/inheritance/config023.cfg \
functional/inheritance/config024.cfg \
functional/inheritance/inherit.kshlib \
functional/inheritance/README.config \
functional/inheritance/README.state \
functional/inheritance/state001.cfg \
functional/inheritance/state002.cfg \
functional/inheritance/state003.cfg \
functional/inheritance/state004.cfg \
functional/inheritance/state005.cfg \
functional/inheritance/state006.cfg \
functional/inheritance/state007.cfg \
functional/inheritance/state008.cfg \
functional/inheritance/state009.cfg \
functional/inheritance/state010.cfg \
functional/inheritance/state011.cfg \
functional/inheritance/state012.cfg \
functional/inheritance/state013.cfg \
functional/inheritance/state014.cfg \
functional/inheritance/state015.cfg \
functional/inheritance/state016.cfg \
functional/inheritance/state017.cfg \
functional/inheritance/state018.cfg \
functional/inheritance/state019.cfg \
functional/inheritance/state020.cfg \
functional/inheritance/state021.cfg \
functional/inheritance/state022.cfg \
functional/inheritance/state023.cfg \
functional/inheritance/state024.cfg \
functional/inuse/inuse.cfg \
functional/io/io.cfg \
functional/l2arc/l2arc.cfg \
functional/largest_pool/largest_pool.cfg \
functional/migration/migration.cfg \
functional/migration/migration.kshlib \
functional/mmap/mmap.cfg \
functional/mmp/mmp.cfg \
functional/mmp/mmp.kshlib \
functional/mv_files/mv_files.cfg \
functional/mv_files/mv_files_common.kshlib \
functional/nopwrite/nopwrite.shlib \
functional/no_space/enospc.cfg \
functional/online_offline/online_offline.cfg \
functional/pool_checkpoint/pool_checkpoint.kshlib \
functional/projectquota/projectquota.cfg \
functional/projectquota/projectquota_common.kshlib \
functional/quota/quota.cfg \
functional/quota/quota.kshlib \
functional/redacted_send/redacted.cfg \
functional/redacted_send/redacted.kshlib \
functional/redundancy/redundancy.cfg \
functional/redundancy/redundancy.kshlib \
functional/refreserv/refreserv.cfg \
functional/removal/removal.kshlib \
functional/replacement/replacement.cfg \
functional/reservation/reservation.cfg \
functional/reservation/reservation.shlib \
functional/rsend/dedup_encrypted_zvol.bz2 \
functional/rsend/dedup_encrypted_zvol.zsend.bz2 \
functional/rsend/dedup.zsend.bz2 \
functional/rsend/fs.tar.gz \
functional/rsend/rsend.cfg \
functional/rsend/rsend.kshlib \
functional/scrub_mirror/default.cfg \
functional/scrub_mirror/scrub_mirror_common.kshlib \
functional/slog/slog.cfg \
functional/slog/slog.kshlib \
functional/snapshot/snapshot.cfg \
functional/snapused/snapused.kshlib \
functional/sparse/sparse.cfg \
functional/trim/trim.cfg \
functional/trim/trim.kshlib \
functional/truncate/truncate.cfg \
functional/upgrade/upgrade_common.kshlib \
functional/user_namespace/user_namespace.cfg \
functional/user_namespace/user_namespace_common.kshlib \
functional/userquota/13709_reproducer.bz2 \
functional/userquota/userquota.cfg \
functional/userquota/userquota_common.kshlib \
functional/vdev_zaps/vdev_zaps.kshlib \
functional/xattr/xattr.cfg \
functional/xattr/xattr_common.kshlib \
functional/zvol/zvol.cfg \
functional/zvol/zvol_cli/zvol_cli.cfg \
functional/zvol/zvol_common.shlib \
functional/zvol/zvol_ENOSPC/zvol_ENOSPC.cfg \
functional/zvol/zvol_misc/zvol_misc_common.kshlib \
functional/zvol/zvol_swap/zvol_swap.cfg \
functional/idmap_mount/idmap_mount.cfg \
functional/idmap_mount/idmap_mount_common.kshlib
nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/acl/off/cleanup.ksh \
functional/acl/off/dosmode.ksh \
functional/acl/off/posixmode.ksh \
functional/acl/off/setup.ksh \
functional/acl/posix/cleanup.ksh \
functional/acl/posix/posix_001_pos.ksh \
functional/acl/posix/posix_002_pos.ksh \
functional/acl/posix/posix_003_pos.ksh \
functional/acl/posix/posix_004_pos.ksh \
functional/acl/posix-sa/cleanup.ksh \
functional/acl/posix-sa/posix_001_pos.ksh \
functional/acl/posix-sa/posix_002_pos.ksh \
functional/acl/posix-sa/posix_003_pos.ksh \
functional/acl/posix-sa/posix_004_pos.ksh \
functional/acl/posix-sa/setup.ksh \
functional/acl/posix/setup.ksh \
functional/alloc_class/alloc_class_001_pos.ksh \
functional/alloc_class/alloc_class_002_neg.ksh \
functional/alloc_class/alloc_class_003_pos.ksh \
functional/alloc_class/alloc_class_004_pos.ksh \
functional/alloc_class/alloc_class_005_pos.ksh \
functional/alloc_class/alloc_class_006_pos.ksh \
functional/alloc_class/alloc_class_007_pos.ksh \
functional/alloc_class/alloc_class_008_pos.ksh \
functional/alloc_class/alloc_class_009_pos.ksh \
functional/alloc_class/alloc_class_010_pos.ksh \
functional/alloc_class/alloc_class_011_neg.ksh \
functional/alloc_class/alloc_class_012_pos.ksh \
functional/alloc_class/alloc_class_013_pos.ksh \
functional/alloc_class/alloc_class_014_neg.ksh \
functional/alloc_class/alloc_class_015_pos.ksh \
functional/alloc_class/cleanup.ksh \
functional/alloc_class/setup.ksh \
functional/append/file_append.ksh \
functional/append/threadsappend_001_pos.ksh \
functional/append/cleanup.ksh \
functional/append/setup.ksh \
functional/arc/arcstats_runtime_tuning.ksh \
functional/arc/cleanup.ksh \
functional/arc/dbufstats_001_pos.ksh \
functional/arc/dbufstats_002_pos.ksh \
functional/arc/dbufstats_003_pos.ksh \
functional/arc/setup.ksh \
functional/atime/atime_001_pos.ksh \
functional/atime/atime_002_neg.ksh \
functional/atime/atime_003_pos.ksh \
functional/atime/cleanup.ksh \
functional/atime/root_atime_off.ksh \
functional/atime/root_atime_on.ksh \
functional/atime/root_relatime_on.ksh \
functional/atime/setup.ksh \
+ functional/block_cloning/cleanup.ksh \
+ functional/block_cloning/setup.ksh \
+ functional/block_cloning/block_cloning_copyfilerange_cross_dataset.ksh \
+ functional/block_cloning/block_cloning_copyfilerange_fallback.ksh \
+ functional/block_cloning/block_cloning_copyfilerange_fallback_same_txg.ksh \
+ functional/block_cloning/block_cloning_copyfilerange.ksh \
+ functional/block_cloning/block_cloning_copyfilerange_partial.ksh \
+ functional/block_cloning/block_cloning_disabled_copyfilerange.ksh \
+ functional/block_cloning/block_cloning_disabled_ficlone.ksh \
+ functional/block_cloning/block_cloning_disabled_ficlonerange.ksh \
+ functional/block_cloning/block_cloning_ficlone.ksh \
+ functional/block_cloning/block_cloning_ficlonerange.ksh \
+ functional/block_cloning/block_cloning_ficlonerange_partial.ksh \
functional/bootfs/bootfs_001_pos.ksh \
functional/bootfs/bootfs_002_neg.ksh \
functional/bootfs/bootfs_003_pos.ksh \
functional/bootfs/bootfs_004_neg.ksh \
functional/bootfs/bootfs_005_neg.ksh \
functional/bootfs/bootfs_006_pos.ksh \
functional/bootfs/bootfs_007_pos.ksh \
functional/bootfs/bootfs_008_pos.ksh \
functional/bootfs/cleanup.ksh \
functional/bootfs/setup.ksh \
functional/btree/btree_negative.ksh \
functional/btree/btree_positive.ksh \
functional/cache/cache_001_pos.ksh \
functional/cache/cache_002_pos.ksh \
functional/cache/cache_003_pos.ksh \
functional/cache/cache_004_neg.ksh \
functional/cache/cache_005_neg.ksh \
functional/cache/cache_006_pos.ksh \
functional/cache/cache_007_neg.ksh \
functional/cache/cache_008_neg.ksh \
functional/cache/cache_009_pos.ksh \
functional/cache/cache_010_pos.ksh \
functional/cache/cache_011_pos.ksh \
functional/cache/cache_012_pos.ksh \
functional/cache/cleanup.ksh \
functional/cachefile/cachefile_001_pos.ksh \
functional/cachefile/cachefile_002_pos.ksh \
functional/cachefile/cachefile_003_pos.ksh \
functional/cachefile/cachefile_004_pos.ksh \
functional/cachefile/cleanup.ksh \
functional/cachefile/setup.ksh \
functional/cache/setup.ksh \
functional/casenorm/case_all_values.ksh \
functional/casenorm/cleanup.ksh \
functional/casenorm/insensitive_formd_delete.ksh \
functional/casenorm/insensitive_formd_lookup.ksh \
functional/casenorm/insensitive_none_delete.ksh \
functional/casenorm/insensitive_none_lookup.ksh \
functional/casenorm/mixed_create_failure.ksh \
functional/casenorm/mixed_formd_delete.ksh \
functional/casenorm/mixed_formd_lookup_ci.ksh \
functional/casenorm/mixed_formd_lookup.ksh \
functional/casenorm/mixed_none_delete.ksh \
functional/casenorm/mixed_none_lookup_ci.ksh \
functional/casenorm/mixed_none_lookup.ksh \
functional/casenorm/norm_all_values.ksh \
functional/casenorm/sensitive_formd_delete.ksh \
functional/casenorm/sensitive_formd_lookup.ksh \
functional/casenorm/sensitive_none_delete.ksh \
functional/casenorm/sensitive_none_lookup.ksh \
functional/casenorm/setup.ksh \
functional/channel_program/lua_core/cleanup.ksh \
functional/channel_program/lua_core/setup.ksh \
functional/channel_program/lua_core/tst.args_to_lua.ksh \
functional/channel_program/lua_core/tst.divide_by_zero.ksh \
functional/channel_program/lua_core/tst.exists.ksh \
functional/channel_program/lua_core/tst.integer_illegal.ksh \
functional/channel_program/lua_core/tst.integer_overflow.ksh \
functional/channel_program/lua_core/tst.language_functions_neg.ksh \
functional/channel_program/lua_core/tst.language_functions_pos.ksh \
functional/channel_program/lua_core/tst.large_prog.ksh \
functional/channel_program/lua_core/tst.libraries.ksh \
functional/channel_program/lua_core/tst.memory_limit.ksh \
functional/channel_program/lua_core/tst.nested_neg.ksh \
functional/channel_program/lua_core/tst.nested_pos.ksh \
functional/channel_program/lua_core/tst.nvlist_to_lua.ksh \
functional/channel_program/lua_core/tst.recursive_neg.ksh \
functional/channel_program/lua_core/tst.recursive_pos.ksh \
functional/channel_program/lua_core/tst.return_large.ksh \
functional/channel_program/lua_core/tst.return_nvlist_neg.ksh \
functional/channel_program/lua_core/tst.return_nvlist_pos.ksh \
functional/channel_program/lua_core/tst.return_recursive_table.ksh \
functional/channel_program/lua_core/tst.stack_gsub.ksh \
functional/channel_program/lua_core/tst.timeout.ksh \
functional/channel_program/synctask_core/cleanup.ksh \
functional/channel_program/synctask_core/setup.ksh \
functional/channel_program/synctask_core/tst.bookmark.copy.ksh \
functional/channel_program/synctask_core/tst.bookmark.create.ksh \
functional/channel_program/synctask_core/tst.destroy_fs.ksh \
functional/channel_program/synctask_core/tst.destroy_snap.ksh \
functional/channel_program/synctask_core/tst.get_count_and_limit.ksh \
functional/channel_program/synctask_core/tst.get_index_props.ksh \
functional/channel_program/synctask_core/tst.get_mountpoint.ksh \
functional/channel_program/synctask_core/tst.get_neg.ksh \
functional/channel_program/synctask_core/tst.get_number_props.ksh \
functional/channel_program/synctask_core/tst.get_string_props.ksh \
functional/channel_program/synctask_core/tst.get_type.ksh \
functional/channel_program/synctask_core/tst.get_userquota.ksh \
functional/channel_program/synctask_core/tst.get_written.ksh \
functional/channel_program/synctask_core/tst.inherit.ksh \
functional/channel_program/synctask_core/tst.list_bookmarks.ksh \
functional/channel_program/synctask_core/tst.list_children.ksh \
functional/channel_program/synctask_core/tst.list_clones.ksh \
functional/channel_program/synctask_core/tst.list_holds.ksh \
functional/channel_program/synctask_core/tst.list_snapshots.ksh \
functional/channel_program/synctask_core/tst.list_system_props.ksh \
functional/channel_program/synctask_core/tst.list_user_props.ksh \
functional/channel_program/synctask_core/tst.parse_args_neg.ksh \
functional/channel_program/synctask_core/tst.promote_conflict.ksh \
functional/channel_program/synctask_core/tst.promote_multiple.ksh \
functional/channel_program/synctask_core/tst.promote_simple.ksh \
functional/channel_program/synctask_core/tst.rollback_mult.ksh \
functional/channel_program/synctask_core/tst.rollback_one.ksh \
functional/channel_program/synctask_core/tst.set_props.ksh \
functional/channel_program/synctask_core/tst.snapshot_destroy.ksh \
functional/channel_program/synctask_core/tst.snapshot_neg.ksh \
functional/channel_program/synctask_core/tst.snapshot_recursive.ksh \
functional/channel_program/synctask_core/tst.snapshot_rename.ksh \
functional/channel_program/synctask_core/tst.snapshot_simple.ksh \
functional/channel_program/synctask_core/tst.terminate_by_signal.ksh \
functional/chattr/chattr_001_pos.ksh \
functional/chattr/chattr_002_neg.ksh \
functional/chattr/cleanup.ksh \
functional/chattr/setup.ksh \
functional/checksum/cleanup.ksh \
functional/checksum/filetest_001_pos.ksh \
functional/checksum/filetest_002_pos.ksh \
functional/checksum/run_blake3_test.ksh \
functional/checksum/run_edonr_test.ksh \
functional/checksum/run_sha2_test.ksh \
functional/checksum/run_skein_test.ksh \
functional/checksum/setup.ksh \
functional/clean_mirror/clean_mirror_001_pos.ksh \
functional/clean_mirror/clean_mirror_002_pos.ksh \
functional/clean_mirror/clean_mirror_003_pos.ksh \
functional/clean_mirror/clean_mirror_004_pos.ksh \
functional/clean_mirror/cleanup.ksh \
functional/clean_mirror/setup.ksh \
functional/cli_root/zdb/zdb_002_pos.ksh \
functional/cli_root/zdb/zdb_003_pos.ksh \
functional/cli_root/zdb/zdb_004_pos.ksh \
functional/cli_root/zdb/zdb_005_pos.ksh \
functional/cli_root/zdb/zdb_006_pos.ksh \
functional/cli_root/zdb/zdb_args_neg.ksh \
functional/cli_root/zdb/zdb_args_pos.ksh \
functional/cli_root/zdb/zdb_backup.ksh \
functional/cli_root/zdb/zdb_block_size_histogram.ksh \
functional/cli_root/zdb/zdb_checksum.ksh \
functional/cli_root/zdb/zdb_decompress.ksh \
functional/cli_root/zdb/zdb_decompress_zstd.ksh \
functional/cli_root/zdb/zdb_display_block.ksh \
functional/cli_root/zdb/zdb_encrypted.ksh \
functional/cli_root/zdb/zdb_label_checksum.ksh \
functional/cli_root/zdb/zdb_object_range_neg.ksh \
functional/cli_root/zdb/zdb_object_range_pos.ksh \
functional/cli_root/zdb/zdb_objset_id.ksh \
functional/cli_root/zdb/zdb_recover_2.ksh \
functional/cli_root/zdb/zdb_recover.ksh \
functional/cli_root/zfs_bookmark/cleanup.ksh \
functional/cli_root/zfs_bookmark/setup.ksh \
functional/cli_root/zfs_bookmark/zfs_bookmark_cliargs.ksh \
functional/cli_root/zfs_change-key/cleanup.ksh \
functional/cli_root/zfs_change-key/setup.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_child.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_clones.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_format.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_inherit.ksh \
functional/cli_root/zfs_change-key/zfs_change-key.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_load.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_location.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_pbkdf2iters.ksh \
functional/cli_root/zfs/cleanup.ksh \
functional/cli_root/zfs_clone/cleanup.ksh \
functional/cli_root/zfs_clone/setup.ksh \
functional/cli_root/zfs_clone/zfs_clone_001_neg.ksh \
functional/cli_root/zfs_clone/zfs_clone_002_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_003_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_004_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_005_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_006_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_007_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_008_neg.ksh \
functional/cli_root/zfs_clone/zfs_clone_009_neg.ksh \
functional/cli_root/zfs_clone/zfs_clone_010_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_deeply_nested.ksh \
functional/cli_root/zfs_clone/zfs_clone_encrypted.ksh \
functional/cli_root/zfs_clone/zfs_clone_rm_nested.ksh \
functional/cli_root/zfs_copies/cleanup.ksh \
functional/cli_root/zfs_copies/setup.ksh \
functional/cli_root/zfs_copies/zfs_copies_001_pos.ksh \
functional/cli_root/zfs_copies/zfs_copies_002_pos.ksh \
functional/cli_root/zfs_copies/zfs_copies_003_pos.ksh \
functional/cli_root/zfs_copies/zfs_copies_004_neg.ksh \
functional/cli_root/zfs_copies/zfs_copies_005_neg.ksh \
functional/cli_root/zfs_copies/zfs_copies_006_pos.ksh \
functional/cli_root/zfs_create/cleanup.ksh \
functional/cli_root/zfs_create/setup.ksh \
functional/cli_root/zfs_create/zfs_create_001_pos.ksh \
functional/cli_root/zfs_create/zfs_create_002_pos.ksh \
functional/cli_root/zfs_create/zfs_create_003_pos.ksh \
functional/cli_root/zfs_create/zfs_create_004_pos.ksh \
functional/cli_root/zfs_create/zfs_create_005_pos.ksh \
functional/cli_root/zfs_create/zfs_create_006_pos.ksh \
functional/cli_root/zfs_create/zfs_create_007_pos.ksh \
functional/cli_root/zfs_create/zfs_create_008_neg.ksh \
functional/cli_root/zfs_create/zfs_create_009_neg.ksh \
functional/cli_root/zfs_create/zfs_create_010_neg.ksh \
functional/cli_root/zfs_create/zfs_create_011_pos.ksh \
functional/cli_root/zfs_create/zfs_create_012_pos.ksh \
functional/cli_root/zfs_create/zfs_create_013_pos.ksh \
functional/cli_root/zfs_create/zfs_create_014_pos.ksh \
functional/cli_root/zfs_create/zfs_create_crypt_combos.ksh \
functional/cli_root/zfs_create/zfs_create_dryrun.ksh \
functional/cli_root/zfs_create/zfs_create_encrypted.ksh \
functional/cli_root/zfs_create/zfs_create_nomount.ksh \
functional/cli_root/zfs_create/zfs_create_verbose.ksh \
functional/cli_root/zfs_destroy/cleanup.ksh \
functional/cli_root/zfs_destroy/setup.ksh \
functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_and_disable.ksh \
functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_races.ksh \
functional/cli_root/zfs_destroy/zfs_clone_livelist_dedup.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_001_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_002_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_003_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_004_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_005_neg.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_006_neg.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_007_neg.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_008_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_009_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_010_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_011_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_012_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_013_neg.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_014_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_015_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_016_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_clone_livelist.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_dev_removal_condense.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_dev_removal.ksh \
functional/cli_root/zfs_diff/cleanup.ksh \
functional/cli_root/zfs_diff/setup.ksh \
functional/cli_root/zfs_diff/zfs_diff_changes.ksh \
functional/cli_root/zfs_diff/zfs_diff_cliargs.ksh \
functional/cli_root/zfs_diff/zfs_diff_encrypted.ksh \
functional/cli_root/zfs_diff/zfs_diff_mangle.ksh \
functional/cli_root/zfs_diff/zfs_diff_timestamp.ksh \
functional/cli_root/zfs_diff/zfs_diff_types.ksh \
functional/cli_root/zfs_get/cleanup.ksh \
functional/cli_root/zfs_get/setup.ksh \
functional/cli_root/zfs_get/zfs_get_001_pos.ksh \
functional/cli_root/zfs_get/zfs_get_002_pos.ksh \
functional/cli_root/zfs_get/zfs_get_003_pos.ksh \
functional/cli_root/zfs_get/zfs_get_004_pos.ksh \
functional/cli_root/zfs_get/zfs_get_005_neg.ksh \
functional/cli_root/zfs_get/zfs_get_006_neg.ksh \
functional/cli_root/zfs_get/zfs_get_007_neg.ksh \
functional/cli_root/zfs_get/zfs_get_008_pos.ksh \
functional/cli_root/zfs_get/zfs_get_009_pos.ksh \
functional/cli_root/zfs_get/zfs_get_010_neg.ksh \
functional/cli_root/zfs_ids_to_path/cleanup.ksh \
functional/cli_root/zfs_ids_to_path/setup.ksh \
functional/cli_root/zfs_ids_to_path/zfs_ids_to_path_001_pos.ksh \
functional/cli_root/zfs_inherit/cleanup.ksh \
functional/cli_root/zfs_inherit/setup.ksh \
functional/cli_root/zfs_inherit/zfs_inherit_001_neg.ksh \
functional/cli_root/zfs_inherit/zfs_inherit_002_neg.ksh \
functional/cli_root/zfs_inherit/zfs_inherit_003_pos.ksh \
functional/cli_root/zfs_inherit/zfs_inherit_mountpoint.ksh \
functional/cli_root/zfs_jail/cleanup.ksh \
functional/cli_root/zfs_jail/setup.ksh \
functional/cli_root/zfs_jail/zfs_jail_001_pos.ksh \
functional/cli_root/zfs_load-key/cleanup.ksh \
functional/cli_root/zfs_load-key/setup.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_all.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_file.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_https.ksh \
functional/cli_root/zfs_load-key/zfs_load-key.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_location.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_noop.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_recursive.ksh \
functional/cli_root/zfs_mount/cleanup.ksh \
functional/cli_root/zfs_mount/setup.ksh \
functional/cli_root/zfs_mount/zfs_mount_001_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_002_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_003_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_004_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_005_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_006_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_007_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_008_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_009_neg.ksh \
functional/cli_root/zfs_mount/zfs_mount_010_neg.ksh \
functional/cli_root/zfs_mount/zfs_mount_011_neg.ksh \
functional/cli_root/zfs_mount/zfs_mount_012_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_013_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_014_neg.ksh \
functional/cli_root/zfs_mount/zfs_mount_all_001_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_all_fail.ksh \
functional/cli_root/zfs_mount/zfs_mount_all_mountpoints.ksh \
functional/cli_root/zfs_mount/zfs_mount_encrypted.ksh \
functional/cli_root/zfs_mount/zfs_mount_remount.ksh \
functional/cli_root/zfs_mount/zfs_mount_test_race.ksh \
functional/cli_root/zfs_mount/zfs_multi_mount.ksh \
functional/cli_root/zfs_program/cleanup.ksh \
functional/cli_root/zfs_program/setup.ksh \
functional/cli_root/zfs_program/zfs_program_json.ksh \
functional/cli_root/zfs_promote/cleanup.ksh \
functional/cli_root/zfs_promote/setup.ksh \
functional/cli_root/zfs_promote/zfs_promote_001_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_002_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_003_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_004_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_005_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_006_neg.ksh \
functional/cli_root/zfs_promote/zfs_promote_007_neg.ksh \
functional/cli_root/zfs_promote/zfs_promote_008_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_encryptionroot.ksh \
functional/cli_root/zfs_property/cleanup.ksh \
functional/cli_root/zfs_property/setup.ksh \
functional/cli_root/zfs_property/zfs_written_property_001_pos.ksh \
functional/cli_root/zfs_receive/cleanup.ksh \
functional/cli_root/zfs_receive/receive-o-x_props_aliases.ksh \
functional/cli_root/zfs_receive/receive-o-x_props_override.ksh \
functional/cli_root/zfs_receive/setup.ksh \
functional/cli_root/zfs_receive/zfs_receive_001_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_002_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_003_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_004_neg.ksh \
functional/cli_root/zfs_receive/zfs_receive_005_neg.ksh \
functional/cli_root/zfs_receive/zfs_receive_006_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_007_neg.ksh \
functional/cli_root/zfs_receive/zfs_receive_008_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_009_neg.ksh \
functional/cli_root/zfs_receive/zfs_receive_010_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_011_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_012_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_013_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_014_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_015_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_016_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_-e.ksh \
functional/cli_root/zfs_receive/zfs_receive_from_encrypted.ksh \
functional/cli_root/zfs_receive/zfs_receive_from_zstd.ksh \
functional/cli_root/zfs_receive/zfs_receive_new_props.ksh \
functional/cli_root/zfs_receive/zfs_receive_raw_-d.ksh \
functional/cli_root/zfs_receive/zfs_receive_raw_incremental.ksh \
functional/cli_root/zfs_receive/zfs_receive_raw.ksh \
functional/cli_root/zfs_receive/zfs_receive_to_encrypted.ksh \
functional/cli_root/zfs_receive/zfs_receive_-wR-encrypted-mix.ksh \
functional/cli_root/zfs_receive/zfs_receive_corrective.ksh \
functional/cli_root/zfs_receive/zfs_receive_compressed_corrective.ksh \
functional/cli_root/zfs_receive/zfs_receive_large_block_corrective.ksh \
functional/cli_root/zfs_rename/cleanup.ksh \
functional/cli_root/zfs_rename/setup.ksh \
functional/cli_root/zfs_rename/zfs_rename_001_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_002_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_003_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_004_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_005_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_006_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_007_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_008_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_009_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_010_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_011_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_012_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_013_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_014_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_encrypted_child.ksh \
functional/cli_root/zfs_rename/zfs_rename_mountpoint.ksh \
functional/cli_root/zfs_rename/zfs_rename_nounmount.ksh \
functional/cli_root/zfs_rename/zfs_rename_to_encrypted.ksh \
functional/cli_root/zfs_reservation/cleanup.ksh \
functional/cli_root/zfs_reservation/setup.ksh \
functional/cli_root/zfs_reservation/zfs_reservation_001_pos.ksh \
functional/cli_root/zfs_reservation/zfs_reservation_002_pos.ksh \
functional/cli_root/zfs_rollback/cleanup.ksh \
functional/cli_root/zfs_rollback/setup.ksh \
functional/cli_root/zfs_rollback/zfs_rollback_001_pos.ksh \
functional/cli_root/zfs_rollback/zfs_rollback_002_pos.ksh \
functional/cli_root/zfs_rollback/zfs_rollback_003_neg.ksh \
functional/cli_root/zfs_rollback/zfs_rollback_004_neg.ksh \
functional/cli_root/zfs_send/cleanup.ksh \
functional/cli_root/zfs_send/setup.ksh \
functional/cli_root/zfs_send/zfs_send_001_pos.ksh \
functional/cli_root/zfs_send/zfs_send_002_pos.ksh \
functional/cli_root/zfs_send/zfs_send_003_pos.ksh \
functional/cli_root/zfs_send/zfs_send_004_neg.ksh \
functional/cli_root/zfs_send/zfs_send_005_pos.ksh \
functional/cli_root/zfs_send/zfs_send_006_pos.ksh \
functional/cli_root/zfs_send/zfs_send_007_pos.ksh \
functional/cli_root/zfs_send/zfs_send-b.ksh \
functional/cli_root/zfs_send/zfs_send_encrypted.ksh \
functional/cli_root/zfs_send/zfs_send_encrypted_unloaded.ksh \
functional/cli_root/zfs_send/zfs_send_raw.ksh \
functional/cli_root/zfs_send/zfs_send_skip_missing.ksh \
functional/cli_root/zfs_send/zfs_send_sparse.ksh \
functional/cli_root/zfs_set/cache_001_pos.ksh \
functional/cli_root/zfs_set/cache_002_neg.ksh \
functional/cli_root/zfs_set/canmount_001_pos.ksh \
functional/cli_root/zfs_set/canmount_002_pos.ksh \
functional/cli_root/zfs_set/canmount_003_pos.ksh \
functional/cli_root/zfs_set/canmount_004_pos.ksh \
functional/cli_root/zfs_set/checksum_001_pos.ksh \
functional/cli_root/zfs_set/cleanup.ksh \
functional/cli_root/zfs_set/compression_001_pos.ksh \
functional/cli_root/zfs_set/mountpoint_001_pos.ksh \
functional/cli_root/zfs_set/mountpoint_002_pos.ksh \
functional/cli_root/zfs_set/mountpoint_003_pos.ksh \
functional/cli_root/zfs_set/onoffs_001_pos.ksh \
functional/cli_root/zfs_set/property_alias_001_pos.ksh \
functional/cli_root/zfs_set/readonly_001_pos.ksh \
functional/cli_root/zfs_set/reservation_001_neg.ksh \
functional/cli_root/zfs_set/ro_props_001_pos.ksh \
functional/cli_root/zfs_set/setup.ksh \
functional/cli_root/zfs_set/share_mount_001_neg.ksh \
functional/cli_root/zfs_set/snapdir_001_pos.ksh \
functional/cli_root/zfs/setup.ksh \
functional/cli_root/zfs_set/user_property_001_pos.ksh \
functional/cli_root/zfs_set/user_property_002_pos.ksh \
functional/cli_root/zfs_set/user_property_003_neg.ksh \
functional/cli_root/zfs_set/user_property_004_pos.ksh \
functional/cli_root/zfs_set/version_001_neg.ksh \
functional/cli_root/zfs_set/zfs_set_001_neg.ksh \
functional/cli_root/zfs_set/zfs_set_002_neg.ksh \
functional/cli_root/zfs_set/zfs_set_003_neg.ksh \
functional/cli_root/zfs_set/zfs_set_feature_activation.ksh \
functional/cli_root/zfs_set/zfs_set_keylocation.ksh \
functional/cli_root/zfs_share/cleanup.ksh \
functional/cli_root/zfs_share/setup.ksh \
functional/cli_root/zfs_share/zfs_share_001_pos.ksh \
functional/cli_root/zfs_share/zfs_share_002_pos.ksh \
functional/cli_root/zfs_share/zfs_share_003_pos.ksh \
functional/cli_root/zfs_share/zfs_share_004_pos.ksh \
functional/cli_root/zfs_share/zfs_share_005_pos.ksh \
functional/cli_root/zfs_share/zfs_share_006_pos.ksh \
functional/cli_root/zfs_share/zfs_share_007_neg.ksh \
functional/cli_root/zfs_share/zfs_share_008_neg.ksh \
functional/cli_root/zfs_share/zfs_share_009_neg.ksh \
functional/cli_root/zfs_share/zfs_share_010_neg.ksh \
functional/cli_root/zfs_share/zfs_share_011_pos.ksh \
functional/cli_root/zfs_share/zfs_share_012_pos.ksh \
functional/cli_root/zfs_share/zfs_share_013_pos.ksh \
functional/cli_root/zfs_share/zfs_share_concurrent_shares.ksh \
functional/cli_root/zfs_snapshot/cleanup.ksh \
functional/cli_root/zfs_snapshot/setup.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_001_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_002_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_003_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_004_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_005_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_006_pos.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_007_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_008_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_009_pos.ksh \
functional/cli_root/zfs_sysfs/cleanup.ksh \
functional/cli_root/zfs_sysfs/setup.ksh \
functional/cli_root/zfs_sysfs/zfeature_set_unsupported.ksh \
functional/cli_root/zfs_sysfs/zfs_get_unsupported.ksh \
functional/cli_root/zfs_sysfs/zfs_set_unsupported.ksh \
functional/cli_root/zfs_sysfs/zfs_sysfs_live.ksh \
functional/cli_root/zfs_sysfs/zpool_get_unsupported.ksh \
functional/cli_root/zfs_sysfs/zpool_set_unsupported.ksh \
functional/cli_root/zfs_unload-key/cleanup.ksh \
functional/cli_root/zfs_unload-key/setup.ksh \
functional/cli_root/zfs_unload-key/zfs_unload-key_all.ksh \
functional/cli_root/zfs_unload-key/zfs_unload-key.ksh \
functional/cli_root/zfs_unload-key/zfs_unload-key_recursive.ksh \
functional/cli_root/zfs_unmount/cleanup.ksh \
functional/cli_root/zfs_unmount/setup.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_001_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_002_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_003_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_004_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_005_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_006_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_007_neg.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_008_neg.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_009_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_all_001_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_nested.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_unload_keys.ksh \
functional/cli_root/zfs_unshare/cleanup.ksh \
functional/cli_root/zfs_unshare/setup.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_001_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_002_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_003_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_004_neg.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_005_neg.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_006_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_007_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_008_pos.ksh \
functional/cli_root/zfs_upgrade/cleanup.ksh \
functional/cli_root/zfs_upgrade/setup.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_001_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_002_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_003_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_004_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_005_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_006_neg.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_007_neg.ksh \
functional/cli_root/zfs_wait/cleanup.ksh \
functional/cli_root/zfs_wait/setup.ksh \
functional/cli_root/zfs_wait/zfs_wait_deleteq.ksh \
functional/cli_root/zfs_wait/zfs_wait_getsubopt.ksh \
functional/cli_root/zfs/zfs_001_neg.ksh \
functional/cli_root/zfs/zfs_002_pos.ksh \
functional/cli_root/zfs/zfs_003_neg.ksh \
functional/cli_root/zhack/zhack_label_repair_001.ksh \
functional/cli_root/zhack/zhack_label_repair_002.ksh \
functional/cli_root/zhack/zhack_label_repair_003.ksh \
functional/cli_root/zhack/zhack_label_repair_004.ksh \
functional/cli_root/zpool_add/add_nested_replacing_spare.ksh \
functional/cli_root/zpool_add/add-o_ashift.ksh \
functional/cli_root/zpool_add/add_prop_ashift.ksh \
functional/cli_root/zpool_add/cleanup.ksh \
functional/cli_root/zpool_add/setup.ksh \
functional/cli_root/zpool_add/zpool_add_001_pos.ksh \
functional/cli_root/zpool_add/zpool_add_002_pos.ksh \
functional/cli_root/zpool_add/zpool_add_003_pos.ksh \
functional/cli_root/zpool_add/zpool_add_004_pos.ksh \
functional/cli_root/zpool_add/zpool_add_005_pos.ksh \
functional/cli_root/zpool_add/zpool_add_006_pos.ksh \
functional/cli_root/zpool_add/zpool_add_007_neg.ksh \
functional/cli_root/zpool_add/zpool_add_008_neg.ksh \
functional/cli_root/zpool_add/zpool_add_009_neg.ksh \
functional/cli_root/zpool_add/zpool_add_010_pos.ksh \
functional/cli_root/zpool_add/zpool_add_dryrun_output.ksh \
functional/cli_root/zpool_attach/attach-o_ashift.ksh \
functional/cli_root/zpool_attach/cleanup.ksh \
functional/cli_root/zpool_attach/setup.ksh \
functional/cli_root/zpool_attach/zpool_attach_001_neg.ksh \
functional/cli_root/zpool/cleanup.ksh \
functional/cli_root/zpool_clear/cleanup.ksh \
functional/cli_root/zpool_clear/setup.ksh \
functional/cli_root/zpool_clear/zpool_clear_001_pos.ksh \
functional/cli_root/zpool_clear/zpool_clear_002_neg.ksh \
functional/cli_root/zpool_clear/zpool_clear_003_neg.ksh \
functional/cli_root/zpool_clear/zpool_clear_readonly.ksh \
functional/cli_root/zpool_create/cleanup.ksh \
functional/cli_root/zpool_create/create-o_ashift.ksh \
functional/cli_root/zpool_create/setup.ksh \
functional/cli_root/zpool_create/zpool_create_001_pos.ksh \
functional/cli_root/zpool_create/zpool_create_002_pos.ksh \
functional/cli_root/zpool_create/zpool_create_003_pos.ksh \
functional/cli_root/zpool_create/zpool_create_004_pos.ksh \
functional/cli_root/zpool_create/zpool_create_005_pos.ksh \
functional/cli_root/zpool_create/zpool_create_006_pos.ksh \
functional/cli_root/zpool_create/zpool_create_007_neg.ksh \
functional/cli_root/zpool_create/zpool_create_008_pos.ksh \
functional/cli_root/zpool_create/zpool_create_009_neg.ksh \
functional/cli_root/zpool_create/zpool_create_010_neg.ksh \
functional/cli_root/zpool_create/zpool_create_011_neg.ksh \
functional/cli_root/zpool_create/zpool_create_012_neg.ksh \
functional/cli_root/zpool_create/zpool_create_014_neg.ksh \
functional/cli_root/zpool_create/zpool_create_015_neg.ksh \
functional/cli_root/zpool_create/zpool_create_016_pos.ksh \
functional/cli_root/zpool_create/zpool_create_017_neg.ksh \
functional/cli_root/zpool_create/zpool_create_018_pos.ksh \
functional/cli_root/zpool_create/zpool_create_019_pos.ksh \
functional/cli_root/zpool_create/zpool_create_020_pos.ksh \
functional/cli_root/zpool_create/zpool_create_021_pos.ksh \
functional/cli_root/zpool_create/zpool_create_022_pos.ksh \
functional/cli_root/zpool_create/zpool_create_023_neg.ksh \
functional/cli_root/zpool_create/zpool_create_024_pos.ksh \
functional/cli_root/zpool_create/zpool_create_crypt_combos.ksh \
functional/cli_root/zpool_create/zpool_create_draid_001_pos.ksh \
functional/cli_root/zpool_create/zpool_create_draid_002_pos.ksh \
functional/cli_root/zpool_create/zpool_create_draid_003_pos.ksh \
functional/cli_root/zpool_create/zpool_create_draid_004_pos.ksh \
functional/cli_root/zpool_create/zpool_create_dryrun_output.ksh \
functional/cli_root/zpool_create/zpool_create_encrypted.ksh \
functional/cli_root/zpool_create/zpool_create_features_001_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_002_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_003_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_004_neg.ksh \
functional/cli_root/zpool_create/zpool_create_features_005_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_006_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_007_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_008_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_009_pos.ksh \
functional/cli_root/zpool_create/zpool_create_tempname.ksh \
functional/cli_root/zpool_destroy/zpool_destroy_001_pos.ksh \
functional/cli_root/zpool_destroy/zpool_destroy_002_pos.ksh \
functional/cli_root/zpool_destroy/zpool_destroy_003_neg.ksh \
functional/cli_root/zpool_detach/cleanup.ksh \
functional/cli_root/zpool_detach/setup.ksh \
functional/cli_root/zpool_detach/zpool_detach_001_neg.ksh \
functional/cli_root/zpool_events/cleanup.ksh \
functional/cli_root/zpool_events/setup.ksh \
functional/cli_root/zpool_events/zpool_events_clear.ksh \
functional/cli_root/zpool_events/zpool_events_clear_retained.ksh \
functional/cli_root/zpool_events/zpool_events_cliargs.ksh \
functional/cli_root/zpool_events/zpool_events_duplicates.ksh \
functional/cli_root/zpool_events/zpool_events_errors.ksh \
functional/cli_root/zpool_events/zpool_events_follow.ksh \
functional/cli_root/zpool_events/zpool_events_poolname.ksh \
functional/cli_root/zpool_expand/cleanup.ksh \
functional/cli_root/zpool_expand/setup.ksh \
functional/cli_root/zpool_expand/zpool_expand_001_pos.ksh \
functional/cli_root/zpool_expand/zpool_expand_002_pos.ksh \
functional/cli_root/zpool_expand/zpool_expand_003_neg.ksh \
functional/cli_root/zpool_expand/zpool_expand_004_pos.ksh \
functional/cli_root/zpool_expand/zpool_expand_005_pos.ksh \
functional/cli_root/zpool_export/cleanup.ksh \
functional/cli_root/zpool_export/setup.ksh \
functional/cli_root/zpool_export/zpool_export_001_pos.ksh \
functional/cli_root/zpool_export/zpool_export_002_pos.ksh \
functional/cli_root/zpool_export/zpool_export_003_neg.ksh \
functional/cli_root/zpool_export/zpool_export_004_pos.ksh \
functional/cli_root/zpool_get/cleanup.ksh \
functional/cli_root/zpool_get/setup.ksh \
functional/cli_root/zpool_get/vdev_get_001_pos.ksh \
functional/cli_root/zpool_get/zpool_get_001_pos.ksh \
functional/cli_root/zpool_get/zpool_get_002_pos.ksh \
functional/cli_root/zpool_get/zpool_get_003_pos.ksh \
functional/cli_root/zpool_get/zpool_get_004_neg.ksh \
functional/cli_root/zpool_get/zpool_get_005_pos.ksh \
functional/cli_root/zpool_history/cleanup.ksh \
functional/cli_root/zpool_history/setup.ksh \
functional/cli_root/zpool_history/zpool_history_001_neg.ksh \
functional/cli_root/zpool_history/zpool_history_002_pos.ksh \
functional/cli_root/zpool_import/cleanup.ksh \
functional/cli_root/zpool_import/import_cachefile_device_added.ksh \
functional/cli_root/zpool_import/import_cachefile_device_removed.ksh \
functional/cli_root/zpool_import/import_cachefile_device_replaced.ksh \
functional/cli_root/zpool_import/import_cachefile_mirror_attached.ksh \
functional/cli_root/zpool_import/import_cachefile_mirror_detached.ksh \
functional/cli_root/zpool_import/import_cachefile_paths_changed.ksh \
functional/cli_root/zpool_import/import_cachefile_shared_device.ksh \
functional/cli_root/zpool_import/import_devices_missing.ksh \
functional/cli_root/zpool_import/import_log_missing.ksh \
functional/cli_root/zpool_import/import_paths_changed.ksh \
functional/cli_root/zpool_import/import_rewind_config_changed.ksh \
functional/cli_root/zpool_import/import_rewind_device_replaced.ksh \
functional/cli_root/zpool_import/setup.ksh \
functional/cli_root/zpool_import/zpool_import_001_pos.ksh \
functional/cli_root/zpool_import/zpool_import_002_pos.ksh \
functional/cli_root/zpool_import/zpool_import_003_pos.ksh \
functional/cli_root/zpool_import/zpool_import_004_pos.ksh \
functional/cli_root/zpool_import/zpool_import_005_pos.ksh \
functional/cli_root/zpool_import/zpool_import_006_pos.ksh \
functional/cli_root/zpool_import/zpool_import_007_pos.ksh \
functional/cli_root/zpool_import/zpool_import_008_pos.ksh \
functional/cli_root/zpool_import/zpool_import_009_neg.ksh \
functional/cli_root/zpool_import/zpool_import_010_pos.ksh \
functional/cli_root/zpool_import/zpool_import_011_neg.ksh \
functional/cli_root/zpool_import/zpool_import_012_pos.ksh \
functional/cli_root/zpool_import/zpool_import_013_neg.ksh \
functional/cli_root/zpool_import/zpool_import_014_pos.ksh \
functional/cli_root/zpool_import/zpool_import_015_pos.ksh \
functional/cli_root/zpool_import/zpool_import_016_pos.ksh \
functional/cli_root/zpool_import/zpool_import_017_pos.ksh \
functional/cli_root/zpool_import/zpool_import_all_001_pos.ksh \
functional/cli_root/zpool_import/zpool_import_encrypted.ksh \
functional/cli_root/zpool_import/zpool_import_encrypted_load.ksh \
functional/cli_root/zpool_import/zpool_import_errata3.ksh \
functional/cli_root/zpool_import/zpool_import_errata4.ksh \
functional/cli_root/zpool_import/zpool_import_features_001_pos.ksh \
functional/cli_root/zpool_import/zpool_import_features_002_neg.ksh \
functional/cli_root/zpool_import/zpool_import_features_003_pos.ksh \
functional/cli_root/zpool_import/zpool_import_missing_001_pos.ksh \
functional/cli_root/zpool_import/zpool_import_missing_002_pos.ksh \
functional/cli_root/zpool_import/zpool_import_missing_003_pos.ksh \
functional/cli_root/zpool_import/zpool_import_rename_001_pos.ksh \
functional/cli_root/zpool_initialize/cleanup.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_attach_detach_add_remove.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_fault_export_import_online.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_import_export.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_offline_export_import_online.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_online_offline.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_split.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_start_and_cancel_neg.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_start_and_cancel_pos.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_suspend_resume.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_uninit.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_unsupported_vdevs.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_verify_checksums.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_verify_initialized.ksh \
functional/cli_root/zpool_labelclear/zpool_labelclear_active.ksh \
functional/cli_root/zpool_labelclear/zpool_labelclear_exported.ksh \
functional/cli_root/zpool_labelclear/zpool_labelclear_removed.ksh \
functional/cli_root/zpool_labelclear/zpool_labelclear_valid.ksh \
functional/cli_root/zpool_offline/cleanup.ksh \
functional/cli_root/zpool_offline/setup.ksh \
functional/cli_root/zpool_offline/zpool_offline_001_pos.ksh \
functional/cli_root/zpool_offline/zpool_offline_002_neg.ksh \
functional/cli_root/zpool_offline/zpool_offline_003_pos.ksh \
functional/cli_root/zpool_online/cleanup.ksh \
functional/cli_root/zpool_online/setup.ksh \
functional/cli_root/zpool_online/zpool_online_001_pos.ksh \
functional/cli_root/zpool_online/zpool_online_002_neg.ksh \
functional/cli_root/zpool_remove/cleanup.ksh \
functional/cli_root/zpool_remove/setup.ksh \
functional/cli_root/zpool_remove/zpool_remove_001_neg.ksh \
functional/cli_root/zpool_remove/zpool_remove_002_pos.ksh \
functional/cli_root/zpool_remove/zpool_remove_003_pos.ksh \
functional/cli_root/zpool_reopen/cleanup.ksh \
functional/cli_root/zpool_reopen/setup.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_001_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_002_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_003_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_004_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_005_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_006_neg.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_007_pos.ksh \
functional/cli_root/zpool_replace/cleanup.ksh \
functional/cli_root/zpool_replace/replace-o_ashift.ksh \
functional/cli_root/zpool_replace/replace_prop_ashift.ksh \
functional/cli_root/zpool_replace/setup.ksh \
functional/cli_root/zpool_replace/zpool_replace_001_neg.ksh \
functional/cli_root/zpool_resilver/cleanup.ksh \
functional/cli_root/zpool_resilver/setup.ksh \
functional/cli_root/zpool_resilver/zpool_resilver_bad_args.ksh \
functional/cli_root/zpool_resilver/zpool_resilver_restart.ksh \
functional/cli_root/zpool_resilver/zpool_resilver_concurrent.ksh \
functional/cli_root/zpool_scrub/cleanup.ksh \
functional/cli_root/zpool_scrub/setup.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_001_neg.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_002_pos.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_003_pos.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_004_pos.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_005_pos.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_encrypted_unloaded.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_multiple_copies.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_offline_device.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_print_repairing.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_001_pos.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_002_pos.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_003_pos.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_004_pos.ksh \
functional/cli_root/zpool_set/cleanup.ksh \
functional/cli_root/zpool_set/setup.ksh \
functional/cli_root/zpool/setup.ksh \
functional/cli_root/zpool_set/vdev_set_001_pos.ksh \
functional/cli_root/zpool_set/zpool_set_common.kshlib \
functional/cli_root/zpool_set/zpool_set_001_pos.ksh \
functional/cli_root/zpool_set/zpool_set_002_neg.ksh \
functional/cli_root/zpool_set/zpool_set_003_neg.ksh \
functional/cli_root/zpool_set/zpool_set_ashift.ksh \
functional/cli_root/zpool_set/user_property_001_pos.ksh \
functional/cli_root/zpool_set/user_property_002_neg.ksh \
functional/cli_root/zpool_set/zpool_set_features.ksh \
functional/cli_root/zpool_split/cleanup.ksh \
functional/cli_root/zpool_split/setup.ksh \
functional/cli_root/zpool_split/zpool_split_cliargs.ksh \
functional/cli_root/zpool_split/zpool_split_devices.ksh \
functional/cli_root/zpool_split/zpool_split_dryrun_output.ksh \
functional/cli_root/zpool_split/zpool_split_encryption.ksh \
functional/cli_root/zpool_split/zpool_split_indirect.ksh \
functional/cli_root/zpool_split/zpool_split_props.ksh \
functional/cli_root/zpool_split/zpool_split_resilver.ksh \
functional/cli_root/zpool_split/zpool_split_vdevs.ksh \
functional/cli_root/zpool_split/zpool_split_wholedisk.ksh \
functional/cli_root/zpool_status/cleanup.ksh \
functional/cli_root/zpool_status/setup.ksh \
functional/cli_root/zpool_status/zpool_status_001_pos.ksh \
functional/cli_root/zpool_status/zpool_status_002_pos.ksh \
functional/cli_root/zpool_status/zpool_status_003_pos.ksh \
functional/cli_root/zpool_status/zpool_status_004_pos.ksh \
functional/cli_root/zpool_status/zpool_status_005_pos.ksh \
functional/cli_root/zpool_status/zpool_status_006_pos.ksh \
functional/cli_root/zpool_status/zpool_status_007_pos.ksh \
functional/cli_root/zpool_status/zpool_status_features_001_pos.ksh \
functional/cli_root/zpool_sync/cleanup.ksh \
functional/cli_root/zpool_sync/setup.ksh \
functional/cli_root/zpool_sync/zpool_sync_001_pos.ksh \
functional/cli_root/zpool_sync/zpool_sync_002_neg.ksh \
functional/cli_root/zpool_trim/cleanup.ksh \
functional/cli_root/zpool_trim/setup.ksh \
functional/cli_root/zpool_trim/zpool_trim_attach_detach_add_remove.ksh \
functional/cli_root/zpool_trim/zpool_trim_fault_export_import_online.ksh \
functional/cli_root/zpool_trim/zpool_trim_import_export.ksh \
functional/cli_root/zpool_trim/zpool_trim_multiple.ksh \
functional/cli_root/zpool_trim/zpool_trim_neg.ksh \
functional/cli_root/zpool_trim/zpool_trim_offline_export_import_online.ksh \
functional/cli_root/zpool_trim/zpool_trim_online_offline.ksh \
functional/cli_root/zpool_trim/zpool_trim_partial.ksh \
functional/cli_root/zpool_trim/zpool_trim_rate.ksh \
functional/cli_root/zpool_trim/zpool_trim_rate_neg.ksh \
functional/cli_root/zpool_trim/zpool_trim_secure.ksh \
functional/cli_root/zpool_trim/zpool_trim_split.ksh \
functional/cli_root/zpool_trim/zpool_trim_start_and_cancel_neg.ksh \
functional/cli_root/zpool_trim/zpool_trim_start_and_cancel_pos.ksh \
functional/cli_root/zpool_trim/zpool_trim_suspend_resume.ksh \
functional/cli_root/zpool_trim/zpool_trim_unsupported_vdevs.ksh \
functional/cli_root/zpool_trim/zpool_trim_verify_checksums.ksh \
functional/cli_root/zpool_trim/zpool_trim_verify_trimmed.ksh \
functional/cli_root/zpool_upgrade/cleanup.ksh \
functional/cli_root/zpool_upgrade/setup.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_001_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_002_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_003_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_004_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_005_neg.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_006_neg.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_007_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_008_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_009_neg.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_features_001_pos.ksh \
functional/cli_root/zpool_wait/cleanup.ksh \
functional/cli_root/zpool_wait/scan/cleanup.ksh \
functional/cli_root/zpool_wait/scan/setup.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_rebuild.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_replace_cancel.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_replace.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_resilver.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_scrub_basic.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_scrub_cancel.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_scrub_flag.ksh \
functional/cli_root/zpool_wait/setup.ksh \
functional/cli_root/zpool_wait/zpool_wait_discard.ksh \
functional/cli_root/zpool_wait/zpool_wait_freeing.ksh \
functional/cli_root/zpool_wait/zpool_wait_initialize_basic.ksh \
functional/cli_root/zpool_wait/zpool_wait_initialize_cancel.ksh \
functional/cli_root/zpool_wait/zpool_wait_initialize_flag.ksh \
functional/cli_root/zpool_wait/zpool_wait_multiple.ksh \
functional/cli_root/zpool_wait/zpool_wait_no_activity.ksh \
functional/cli_root/zpool_wait/zpool_wait_remove_cancel.ksh \
functional/cli_root/zpool_wait/zpool_wait_remove.ksh \
functional/cli_root/zpool_wait/zpool_wait_trim_basic.ksh \
functional/cli_root/zpool_wait/zpool_wait_trim_cancel.ksh \
functional/cli_root/zpool_wait/zpool_wait_trim_flag.ksh \
functional/cli_root/zpool_wait/zpool_wait_usage.ksh \
functional/cli_root/zpool/zpool_001_neg.ksh \
functional/cli_root/zpool/zpool_002_pos.ksh \
functional/cli_root/zpool/zpool_003_pos.ksh \
functional/cli_root/zpool/zpool_colors.ksh \
functional/cli_user/misc/arcstat_001_pos.ksh \
functional/cli_user/misc/arc_summary_001_pos.ksh \
functional/cli_user/misc/arc_summary_002_neg.ksh \
functional/cli_user/misc/zilstat_001_pos.ksh \
functional/cli_user/misc/cleanup.ksh \
functional/cli_user/misc/setup.ksh \
functional/cli_user/misc/zdb_001_neg.ksh \
functional/cli_user/misc/zfs_001_neg.ksh \
functional/cli_user/misc/zfs_allow_001_neg.ksh \
functional/cli_user/misc/zfs_clone_001_neg.ksh \
functional/cli_user/misc/zfs_create_001_neg.ksh \
functional/cli_user/misc/zfs_destroy_001_neg.ksh \
functional/cli_user/misc/zfs_get_001_neg.ksh \
functional/cli_user/misc/zfs_inherit_001_neg.ksh \
functional/cli_user/misc/zfs_mount_001_neg.ksh \
functional/cli_user/misc/zfs_promote_001_neg.ksh \
functional/cli_user/misc/zfs_receive_001_neg.ksh \
functional/cli_user/misc/zfs_rename_001_neg.ksh \
functional/cli_user/misc/zfs_rollback_001_neg.ksh \
functional/cli_user/misc/zfs_send_001_neg.ksh \
functional/cli_user/misc/zfs_set_001_neg.ksh \
functional/cli_user/misc/zfs_share_001_neg.ksh \
functional/cli_user/misc/zfs_snapshot_001_neg.ksh \
functional/cli_user/misc/zfs_unallow_001_neg.ksh \
functional/cli_user/misc/zfs_unmount_001_neg.ksh \
functional/cli_user/misc/zfs_unshare_001_neg.ksh \
functional/cli_user/misc/zfs_upgrade_001_neg.ksh \
functional/cli_user/misc/zpool_001_neg.ksh \
functional/cli_user/misc/zpool_add_001_neg.ksh \
functional/cli_user/misc/zpool_attach_001_neg.ksh \
functional/cli_user/misc/zpool_clear_001_neg.ksh \
functional/cli_user/misc/zpool_create_001_neg.ksh \
functional/cli_user/misc/zpool_destroy_001_neg.ksh \
functional/cli_user/misc/zpool_detach_001_neg.ksh \
functional/cli_user/misc/zpool_export_001_neg.ksh \
functional/cli_user/misc/zpool_get_001_neg.ksh \
functional/cli_user/misc/zpool_history_001_neg.ksh \
functional/cli_user/misc/zpool_import_001_neg.ksh \
functional/cli_user/misc/zpool_import_002_neg.ksh \
functional/cli_user/misc/zpool_offline_001_neg.ksh \
functional/cli_user/misc/zpool_online_001_neg.ksh \
functional/cli_user/misc/zpool_remove_001_neg.ksh \
functional/cli_user/misc/zpool_replace_001_neg.ksh \
functional/cli_user/misc/zpool_scrub_001_neg.ksh \
functional/cli_user/misc/zpool_set_001_neg.ksh \
functional/cli_user/misc/zpool_status_001_neg.ksh \
functional/cli_user/misc/zpool_upgrade_001_neg.ksh \
functional/cli_user/misc/zpool_wait_privilege.ksh \
functional/cli_user/zfs_list/cleanup.ksh \
functional/cli_user/zfs_list/setup.ksh \
functional/cli_user/zfs_list/zfs_list_001_pos.ksh \
functional/cli_user/zfs_list/zfs_list_002_pos.ksh \
functional/cli_user/zfs_list/zfs_list_003_pos.ksh \
functional/cli_user/zfs_list/zfs_list_004_neg.ksh \
functional/cli_user/zfs_list/zfs_list_005_neg.ksh \
functional/cli_user/zfs_list/zfs_list_007_pos.ksh \
functional/cli_user/zfs_list/zfs_list_008_neg.ksh \
functional/cli_user/zpool_iostat/cleanup.ksh \
functional/cli_user/zpool_iostat/setup.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_001_neg.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_002_pos.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_003_neg.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_004_pos.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_005_pos.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_-c_disable.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_-c_homedir.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_-c_searchpath.ksh \
functional/cli_user/zpool_list/cleanup.ksh \
functional/cli_user/zpool_list/setup.ksh \
functional/cli_user/zpool_list/zpool_list_001_pos.ksh \
functional/cli_user/zpool_list/zpool_list_002_neg.ksh \
functional/cli_user/zpool_status/cleanup.ksh \
functional/cli_user/zpool_status/setup.ksh \
functional/cli_user/zpool_status/zpool_status_003_pos.ksh \
functional/cli_user/zpool_status/zpool_status_-c_disable.ksh \
functional/cli_user/zpool_status/zpool_status_-c_homedir.ksh \
functional/cli_user/zpool_status/zpool_status_-c_searchpath.ksh \
functional/compression/cleanup.ksh \
functional/compression/compress_001_pos.ksh \
functional/compression/compress_002_pos.ksh \
functional/compression/compress_003_pos.ksh \
functional/compression/compress_004_pos.ksh \
functional/compression/compress_zstd_bswap.ksh \
functional/compression/l2arc_compressed_arc_disabled.ksh \
functional/compression/l2arc_compressed_arc.ksh \
functional/compression/l2arc_encrypted.ksh \
functional/compression/l2arc_encrypted_no_compressed_arc.ksh \
functional/compression/setup.ksh \
functional/cp_files/cleanup.ksh \
functional/cp_files/cp_files_001_pos.ksh \
functional/cp_files/setup.ksh \
functional/crtime/cleanup.ksh \
functional/crtime/crtime_001_pos.ksh \
functional/crtime/setup.ksh \
functional/ctime/cleanup.ksh \
functional/ctime/ctime_001_pos.ksh \
functional/ctime/setup.ksh \
functional/deadman/deadman_ratelimit.ksh \
functional/deadman/deadman_sync.ksh \
functional/deadman/deadman_zio.ksh \
functional/delegate/cleanup.ksh \
functional/delegate/setup.ksh \
functional/delegate/zfs_allow_001_pos.ksh \
functional/delegate/zfs_allow_002_pos.ksh \
functional/delegate/zfs_allow_003_pos.ksh \
functional/delegate/zfs_allow_004_pos.ksh \
functional/delegate/zfs_allow_005_pos.ksh \
functional/delegate/zfs_allow_006_pos.ksh \
functional/delegate/zfs_allow_007_pos.ksh \
functional/delegate/zfs_allow_008_pos.ksh \
functional/delegate/zfs_allow_009_neg.ksh \
functional/delegate/zfs_allow_010_pos.ksh \
functional/delegate/zfs_allow_011_neg.ksh \
functional/delegate/zfs_allow_012_neg.ksh \
functional/delegate/zfs_unallow_001_pos.ksh \
functional/delegate/zfs_unallow_002_pos.ksh \
functional/delegate/zfs_unallow_003_pos.ksh \
functional/delegate/zfs_unallow_004_pos.ksh \
functional/delegate/zfs_unallow_005_pos.ksh \
functional/delegate/zfs_unallow_006_pos.ksh \
functional/delegate/zfs_unallow_007_neg.ksh \
functional/delegate/zfs_unallow_008_neg.ksh \
functional/devices/cleanup.ksh \
functional/devices/devices_001_pos.ksh \
functional/devices/devices_002_neg.ksh \
functional/devices/devices_003_pos.ksh \
functional/devices/setup.ksh \
functional/dos_attributes/cleanup.ksh \
functional/dos_attributes/read_dos_attrs_001.ksh \
functional/dos_attributes/setup.ksh \
functional/dos_attributes/write_dos_attrs_001.ksh \
functional/events/cleanup.ksh \
functional/events/events_001_pos.ksh \
functional/events/events_002_pos.ksh \
functional/events/setup.ksh \
functional/events/zed_cksum_config.ksh \
functional/events/zed_cksum_reported.ksh \
functional/events/zed_fd_spill.ksh \
functional/events/zed_io_config.ksh \
functional/events/zed_rc_filter.ksh \
functional/exec/cleanup.ksh \
functional/exec/exec_001_pos.ksh \
functional/exec/exec_002_neg.ksh \
functional/exec/setup.ksh \
functional/fadvise/cleanup.ksh \
functional/fadvise/fadvise_sequential.ksh \
functional/fadvise/setup.ksh \
functional/fallocate/cleanup.ksh \
functional/fallocate/fallocate_prealloc.ksh \
functional/fallocate/fallocate_punch-hole.ksh \
functional/fallocate/fallocate_zero-range.ksh \
functional/fallocate/setup.ksh \
functional/fault/auto_offline_001_pos.ksh \
functional/fault/auto_online_001_pos.ksh \
functional/fault/auto_online_002_pos.ksh \
functional/fault/auto_replace_001_pos.ksh \
functional/fault/auto_spare_001_pos.ksh \
functional/fault/auto_spare_002_pos.ksh \
functional/fault/auto_spare_ashift.ksh \
functional/fault/auto_spare_multiple.ksh \
functional/fault/auto_spare_shared.ksh \
functional/fault/cleanup.ksh \
functional/fault/decompress_fault.ksh \
functional/fault/decrypt_fault.ksh \
functional/fault/scrub_after_resilver.ksh \
functional/fault/setup.ksh \
functional/fault/zpool_status_-s.ksh \
functional/features/async_destroy/async_destroy_001_pos.ksh \
functional/features/async_destroy/cleanup.ksh \
functional/features/async_destroy/setup.ksh \
functional/features/large_dnode/cleanup.ksh \
functional/features/large_dnode/large_dnode_001_pos.ksh \
functional/features/large_dnode/large_dnode_002_pos.ksh \
functional/features/large_dnode/large_dnode_003_pos.ksh \
functional/features/large_dnode/large_dnode_004_neg.ksh \
functional/features/large_dnode/large_dnode_005_pos.ksh \
functional/features/large_dnode/large_dnode_006_pos.ksh \
functional/features/large_dnode/large_dnode_007_neg.ksh \
functional/features/large_dnode/large_dnode_008_pos.ksh \
functional/features/large_dnode/large_dnode_009_pos.ksh \
functional/features/large_dnode/setup.ksh \
functional/grow/grow_pool_001_pos.ksh \
functional/grow/grow_replicas_001_pos.ksh \
functional/history/cleanup.ksh \
functional/history/history_001_pos.ksh \
functional/history/history_002_pos.ksh \
functional/history/history_003_pos.ksh \
functional/history/history_004_pos.ksh \
functional/history/history_005_neg.ksh \
functional/history/history_006_neg.ksh \
functional/history/history_007_pos.ksh \
functional/history/history_008_pos.ksh \
functional/history/history_009_pos.ksh \
functional/history/history_010_pos.ksh \
functional/history/setup.ksh \
functional/inheritance/cleanup.ksh \
functional/inheritance/inherit_001_pos.ksh \
functional/inuse/inuse_001_pos.ksh \
functional/inuse/inuse_003_pos.ksh \
functional/inuse/inuse_004_pos.ksh \
functional/inuse/inuse_005_pos.ksh \
functional/inuse/inuse_006_pos.ksh \
functional/inuse/inuse_007_pos.ksh \
functional/inuse/inuse_008_pos.ksh \
functional/inuse/inuse_009_pos.ksh \
functional/inuse/setup.ksh \
functional/io/cleanup.ksh \
functional/io/io_uring.ksh \
functional/io/libaio.ksh \
functional/io/mmap.ksh \
functional/io/posixaio.ksh \
functional/io/psync.ksh \
functional/io/setup.ksh \
functional/io/sync.ksh \
functional/l2arc/cleanup.ksh \
functional/l2arc/l2arc_arcstats_pos.ksh \
functional/l2arc/l2arc_l2miss_pos.ksh \
functional/l2arc/l2arc_mfuonly_pos.ksh \
functional/l2arc/persist_l2arc_001_pos.ksh \
functional/l2arc/persist_l2arc_002_pos.ksh \
functional/l2arc/persist_l2arc_003_neg.ksh \
functional/l2arc/persist_l2arc_004_pos.ksh \
functional/l2arc/persist_l2arc_005_pos.ksh \
functional/l2arc/setup.ksh \
functional/large_files/cleanup.ksh \
functional/large_files/large_files_001_pos.ksh \
functional/large_files/large_files_002_pos.ksh \
functional/large_files/setup.ksh \
functional/largest_pool/largest_pool_001_pos.ksh \
functional/libzfs/cleanup.ksh \
functional/libzfs/libzfs_input.ksh \
functional/libzfs/setup.ksh \
functional/limits/cleanup.ksh \
functional/limits/filesystem_count.ksh \
functional/limits/filesystem_limit.ksh \
functional/limits/setup.ksh \
functional/limits/snapshot_count.ksh \
functional/limits/snapshot_limit.ksh \
functional/link_count/cleanup.ksh \
functional/link_count/link_count_001.ksh \
functional/link_count/link_count_root_inode.ksh \
functional/link_count/setup.ksh \
functional/log_spacemap/log_spacemap_import_logs.ksh \
functional/migration/cleanup.ksh \
functional/migration/migration_001_pos.ksh \
functional/migration/migration_002_pos.ksh \
functional/migration/migration_003_pos.ksh \
functional/migration/migration_004_pos.ksh \
functional/migration/migration_005_pos.ksh \
functional/migration/migration_006_pos.ksh \
functional/migration/migration_007_pos.ksh \
functional/migration/migration_008_pos.ksh \
functional/migration/migration_009_pos.ksh \
functional/migration/migration_010_pos.ksh \
functional/migration/migration_011_pos.ksh \
functional/migration/migration_012_pos.ksh \
functional/migration/setup.ksh \
functional/mmap/cleanup.ksh \
functional/mmap/mmap_libaio_001_pos.ksh \
functional/mmap/mmap_mixed.ksh \
functional/mmap/mmap_read_001_pos.ksh \
functional/mmap/mmap_seek_001_pos.ksh \
functional/mmap/mmap_sync_001_pos.ksh \
functional/mmap/mmap_write_001_pos.ksh \
functional/mmap/setup.ksh \
functional/mmp/cleanup.ksh \
functional/mmp/mmp_active_import.ksh \
functional/mmp/mmp_exported_import.ksh \
functional/mmp/mmp_hostid.ksh \
functional/mmp/mmp_inactive_import.ksh \
functional/mmp/mmp_interval.ksh \
functional/mmp/mmp_on_off.ksh \
functional/mmp/mmp_on_thread.ksh \
functional/mmp/mmp_on_uberblocks.ksh \
functional/mmp/mmp_on_zdb.ksh \
functional/mmp/mmp_reset_interval.ksh \
functional/mmp/mmp_write_distribution.ksh \
functional/mmp/mmp_write_uberblocks.ksh \
functional/mmp/multihost_history.ksh \
functional/mmp/setup.ksh \
functional/mount/cleanup.ksh \
functional/mount/setup.ksh \
functional/mount/umount_001.ksh \
functional/mount/umountall_001.ksh \
functional/mount/umount_unlinked_drain.ksh \
functional/mv_files/cleanup.ksh \
functional/mv_files/mv_files_001_pos.ksh \
functional/mv_files/mv_files_002_pos.ksh \
functional/mv_files/random_creation.ksh \
functional/mv_files/setup.ksh \
functional/nestedfs/cleanup.ksh \
functional/nestedfs/nestedfs_001_pos.ksh \
functional/nestedfs/setup.ksh \
functional/nopwrite/cleanup.ksh \
functional/nopwrite/nopwrite_copies.ksh \
functional/nopwrite/nopwrite_mtime.ksh \
functional/nopwrite/nopwrite_negative.ksh \
functional/nopwrite/nopwrite_promoted_clone.ksh \
functional/nopwrite/nopwrite_recsize.ksh \
functional/nopwrite/nopwrite_sync.ksh \
functional/nopwrite/nopwrite_varying_compression.ksh \
functional/nopwrite/nopwrite_volume.ksh \
functional/nopwrite/setup.ksh \
functional/no_space/cleanup.ksh \
functional/no_space/enospc_001_pos.ksh \
functional/no_space/enospc_002_pos.ksh \
functional/no_space/enospc_003_pos.ksh \
functional/no_space/enospc_df.ksh \
functional/no_space/enospc_ganging.ksh \
functional/no_space/enospc_rm.ksh \
functional/no_space/setup.ksh \
functional/online_offline/cleanup.ksh \
functional/online_offline/online_offline_001_pos.ksh \
functional/online_offline/online_offline_002_neg.ksh \
functional/online_offline/online_offline_003_neg.ksh \
functional/online_offline/setup.ksh \
functional/pam/cleanup.ksh \
functional/pam/pam_basic.ksh \
functional/pam/pam_nounmount.ksh \
functional/pam/pam_short_password.ksh \
functional/pam/setup.ksh \
functional/pool_checkpoint/checkpoint_after_rewind.ksh \
functional/pool_checkpoint/checkpoint_big_rewind.ksh \
functional/pool_checkpoint/checkpoint_capacity.ksh \
functional/pool_checkpoint/checkpoint_conf_change.ksh \
functional/pool_checkpoint/checkpoint_discard_busy.ksh \
functional/pool_checkpoint/checkpoint_discard.ksh \
functional/pool_checkpoint/checkpoint_discard_many.ksh \
functional/pool_checkpoint/checkpoint_indirect.ksh \
functional/pool_checkpoint/checkpoint_invalid.ksh \
functional/pool_checkpoint/checkpoint_lun_expsz.ksh \
functional/pool_checkpoint/checkpoint_open.ksh \
functional/pool_checkpoint/checkpoint_removal.ksh \
functional/pool_checkpoint/checkpoint_rewind.ksh \
functional/pool_checkpoint/checkpoint_ro_rewind.ksh \
functional/pool_checkpoint/checkpoint_sm_scale.ksh \
functional/pool_checkpoint/checkpoint_twice.ksh \
functional/pool_checkpoint/checkpoint_vdev_add.ksh \
functional/pool_checkpoint/checkpoint_zdb.ksh \
functional/pool_checkpoint/checkpoint_zhack_feat.ksh \
functional/pool_checkpoint/cleanup.ksh \
functional/pool_checkpoint/setup.ksh \
functional/pool_names/pool_names_001_pos.ksh \
functional/pool_names/pool_names_002_neg.ksh \
functional/poolversion/cleanup.ksh \
functional/poolversion/poolversion_001_pos.ksh \
functional/poolversion/poolversion_002_pos.ksh \
functional/poolversion/setup.ksh \
functional/privilege/cleanup.ksh \
functional/privilege/privilege_001_pos.ksh \
functional/privilege/privilege_002_pos.ksh \
functional/privilege/setup.ksh \
functional/procfs/cleanup.ksh \
functional/procfs/pool_state.ksh \
functional/procfs/procfs_list_basic.ksh \
functional/procfs/procfs_list_concurrent_readers.ksh \
functional/procfs/procfs_list_stale_read.ksh \
functional/procfs/setup.ksh \
functional/projectquota/cleanup.ksh \
functional/projectquota/projectid_001_pos.ksh \
functional/projectquota/projectid_002_pos.ksh \
functional/projectquota/projectid_003_pos.ksh \
functional/projectquota/projectquota_001_pos.ksh \
functional/projectquota/projectquota_002_pos.ksh \
functional/projectquota/projectquota_003_pos.ksh \
functional/projectquota/projectquota_004_neg.ksh \
functional/projectquota/projectquota_005_pos.ksh \
functional/projectquota/projectquota_006_pos.ksh \
functional/projectquota/projectquota_007_pos.ksh \
functional/projectquota/projectquota_008_pos.ksh \
functional/projectquota/projectquota_009_pos.ksh \
functional/projectquota/projectspace_001_pos.ksh \
functional/projectquota/projectspace_002_pos.ksh \
functional/projectquota/projectspace_003_pos.ksh \
functional/projectquota/projectspace_004_pos.ksh \
functional/projectquota/projecttree_001_pos.ksh \
functional/projectquota/projecttree_002_pos.ksh \
functional/projectquota/projecttree_003_neg.ksh \
functional/projectquota/setup.ksh \
functional/quota/cleanup.ksh \
functional/quota/quota_001_pos.ksh \
functional/quota/quota_002_pos.ksh \
functional/quota/quota_003_pos.ksh \
functional/quota/quota_004_pos.ksh \
functional/quota/quota_005_pos.ksh \
functional/quota/quota_006_neg.ksh \
functional/quota/setup.ksh \
functional/raidz/cleanup.ksh \
functional/raidz/raidz_001_neg.ksh \
functional/raidz/raidz_002_pos.ksh \
functional/raidz/raidz_003_pos.ksh \
functional/raidz/raidz_004_pos.ksh \
functional/raidz/setup.ksh \
functional/redacted_send/cleanup.ksh \
functional/redacted_send/redacted_compressed.ksh \
functional/redacted_send/redacted_contents.ksh \
functional/redacted_send/redacted_deleted.ksh \
functional/redacted_send/redacted_disabled_feature.ksh \
functional/redacted_send/redacted_embedded.ksh \
functional/redacted_send/redacted_holes.ksh \
functional/redacted_send/redacted_incrementals.ksh \
functional/redacted_send/redacted_largeblocks.ksh \
functional/redacted_send/redacted_many_clones.ksh \
functional/redacted_send/redacted_mixed_recsize.ksh \
functional/redacted_send/redacted_mounts.ksh \
functional/redacted_send/redacted_negative.ksh \
functional/redacted_send/redacted_origin.ksh \
functional/redacted_send/redacted_panic.ksh \
functional/redacted_send/redacted_props.ksh \
functional/redacted_send/redacted_resume.ksh \
functional/redacted_send/redacted_size.ksh \
functional/redacted_send/redacted_volume.ksh \
functional/redacted_send/setup.ksh \
functional/redundancy/cleanup.ksh \
functional/redundancy/redundancy_draid1.ksh \
functional/redundancy/redundancy_draid2.ksh \
functional/redundancy/redundancy_draid3.ksh \
functional/redundancy/redundancy_draid_damaged1.ksh \
functional/redundancy/redundancy_draid_damaged2.ksh \
functional/redundancy/redundancy_draid.ksh \
functional/redundancy/redundancy_draid_spare1.ksh \
functional/redundancy/redundancy_draid_spare2.ksh \
functional/redundancy/redundancy_draid_spare3.ksh \
functional/redundancy/redundancy_mirror.ksh \
functional/redundancy/redundancy_raidz1.ksh \
functional/redundancy/redundancy_raidz2.ksh \
functional/redundancy/redundancy_raidz3.ksh \
functional/redundancy/redundancy_raidz.ksh \
functional/redundancy/redundancy_stripe.ksh \
functional/redundancy/setup.ksh \
functional/refquota/cleanup.ksh \
functional/refquota/refquota_001_pos.ksh \
functional/refquota/refquota_002_pos.ksh \
functional/refquota/refquota_003_pos.ksh \
functional/refquota/refquota_004_pos.ksh \
functional/refquota/refquota_005_pos.ksh \
functional/refquota/refquota_006_neg.ksh \
functional/refquota/refquota_007_neg.ksh \
functional/refquota/refquota_008_neg.ksh \
functional/refquota/setup.ksh \
functional/refreserv/cleanup.ksh \
functional/refreserv/refreserv_001_pos.ksh \
functional/refreserv/refreserv_002_pos.ksh \
functional/refreserv/refreserv_003_pos.ksh \
functional/refreserv/refreserv_004_pos.ksh \
functional/refreserv/refreserv_005_pos.ksh \
functional/refreserv/refreserv_multi_raidz.ksh \
functional/refreserv/refreserv_raidz.ksh \
functional/refreserv/setup.ksh \
functional/removal/cleanup.ksh \
functional/removal/removal_all_vdev.ksh \
functional/removal/removal_cancel.ksh \
functional/removal/removal_check_space.ksh \
functional/removal/removal_condense_export.ksh \
functional/removal/removal_multiple_indirection.ksh \
functional/removal/removal_nopwrite.ksh \
functional/removal/removal_remap_deadlists.ksh \
functional/removal/removal_reservation.ksh \
functional/removal/removal_resume_export.ksh \
functional/removal/removal_sanity.ksh \
functional/removal/removal_with_add.ksh \
functional/removal/removal_with_create_fs.ksh \
functional/removal/removal_with_dedup.ksh \
functional/removal/removal_with_errors.ksh \
functional/removal/removal_with_export.ksh \
functional/removal/removal_with_faulted.ksh \
functional/removal/removal_with_ganging.ksh \
functional/removal/removal_with_indirect.ksh \
functional/removal/removal_with_remove.ksh \
functional/removal/removal_with_scrub.ksh \
functional/removal/removal_with_send.ksh \
functional/removal/removal_with_send_recv.ksh \
functional/removal/removal_with_snapshot.ksh \
functional/removal/removal_with_write.ksh \
functional/removal/removal_with_zdb.ksh \
functional/removal/remove_attach_mirror.ksh \
functional/removal/remove_expanded.ksh \
functional/removal/remove_indirect.ksh \
functional/removal/remove_mirror.ksh \
functional/removal/remove_mirror_sanity.ksh \
functional/removal/remove_raidz.ksh \
functional/rename_dirs/cleanup.ksh \
functional/rename_dirs/rename_dirs_001_pos.ksh \
functional/rename_dirs/setup.ksh \
functional/renameat2/cleanup.ksh \
functional/renameat2/setup.ksh \
functional/renameat2/renameat2_exchange.ksh \
functional/renameat2/renameat2_noreplace.ksh \
functional/renameat2/renameat2_whiteout.ksh \
functional/replacement/attach_import.ksh \
functional/replacement/attach_multiple.ksh \
functional/replacement/attach_rebuild.ksh \
functional/replacement/attach_resilver.ksh \
functional/replacement/cleanup.ksh \
functional/replacement/detach.ksh \
functional/replacement/rebuild_disabled_feature.ksh \
functional/replacement/rebuild_multiple.ksh \
functional/replacement/rebuild_raidz.ksh \
functional/replacement/replace_import.ksh \
functional/replacement/replace_rebuild.ksh \
functional/replacement/replace_resilver.ksh \
functional/replacement/resilver_restart_001.ksh \
functional/replacement/resilver_restart_002.ksh \
functional/replacement/scrub_cancel.ksh \
functional/replacement/setup.ksh \
functional/reservation/cleanup.ksh \
functional/reservation/reservation_001_pos.ksh \
functional/reservation/reservation_002_pos.ksh \
functional/reservation/reservation_003_pos.ksh \
functional/reservation/reservation_004_pos.ksh \
functional/reservation/reservation_005_pos.ksh \
functional/reservation/reservation_006_pos.ksh \
functional/reservation/reservation_007_pos.ksh \
functional/reservation/reservation_008_pos.ksh \
functional/reservation/reservation_009_pos.ksh \
functional/reservation/reservation_010_pos.ksh \
functional/reservation/reservation_011_pos.ksh \
functional/reservation/reservation_012_pos.ksh \
functional/reservation/reservation_013_pos.ksh \
functional/reservation/reservation_014_pos.ksh \
functional/reservation/reservation_015_pos.ksh \
functional/reservation/reservation_016_pos.ksh \
functional/reservation/reservation_017_pos.ksh \
functional/reservation/reservation_018_pos.ksh \
functional/reservation/reservation_019_pos.ksh \
functional/reservation/reservation_020_pos.ksh \
functional/reservation/reservation_021_neg.ksh \
functional/reservation/reservation_022_pos.ksh \
functional/reservation/setup.ksh \
functional/rootpool/cleanup.ksh \
functional/rootpool/rootpool_002_neg.ksh \
functional/rootpool/rootpool_003_neg.ksh \
functional/rootpool/rootpool_007_pos.ksh \
functional/rootpool/setup.ksh \
functional/rsend/cleanup.ksh \
functional/rsend/recv_dedup_encrypted_zvol.ksh \
functional/rsend/recv_dedup.ksh \
functional/rsend/rsend_001_pos.ksh \
functional/rsend/rsend_002_pos.ksh \
functional/rsend/rsend_003_pos.ksh \
functional/rsend/rsend_004_pos.ksh \
functional/rsend/rsend_005_pos.ksh \
functional/rsend/rsend_006_pos.ksh \
functional/rsend/rsend_007_pos.ksh \
functional/rsend/rsend_008_pos.ksh \
functional/rsend/rsend_009_pos.ksh \
functional/rsend/rsend_010_pos.ksh \
functional/rsend/rsend_011_pos.ksh \
functional/rsend/rsend_012_pos.ksh \
functional/rsend/rsend_013_pos.ksh \
functional/rsend/rsend_014_pos.ksh \
functional/rsend/rsend_016_neg.ksh \
functional/rsend/rsend_019_pos.ksh \
functional/rsend/rsend_020_pos.ksh \
functional/rsend/rsend_021_pos.ksh \
functional/rsend/rsend_022_pos.ksh \
functional/rsend/rsend_024_pos.ksh \
functional/rsend/rsend_025_pos.ksh \
functional/rsend/rsend_026_neg.ksh \
functional/rsend/rsend_027_pos.ksh \
functional/rsend/rsend_028_neg.ksh \
functional/rsend/rsend_029_neg.ksh \
functional/rsend/rsend_030_pos.ksh \
functional/rsend/rsend_031_pos.ksh \
functional/rsend/send-c_embedded_blocks.ksh \
functional/rsend/send-c_incremental.ksh \
functional/rsend/send-c_lz4_disabled.ksh \
functional/rsend/send-c_mixed_compression.ksh \
functional/rsend/send-c_props.ksh \
functional/rsend/send-c_recv_dedup.ksh \
functional/rsend/send-c_recv_lz4_disabled.ksh \
functional/rsend/send-c_resume.ksh \
functional/rsend/send-c_stream_size_estimate.ksh \
functional/rsend/send-c_verify_contents.ksh \
functional/rsend/send-c_verify_ratio.ksh \
functional/rsend/send-c_volume.ksh \
functional/rsend/send-c_zstream_recompress.ksh \
functional/rsend/send-c_zstreamdump.ksh \
functional/rsend/send-cpL_varied_recsize.ksh \
functional/rsend/send_doall.ksh \
functional/rsend/send_encrypted_incremental.ksh \
functional/rsend/send_encrypted_files.ksh \
functional/rsend/send_encrypted_freeobjects.ksh \
functional/rsend/send_encrypted_hierarchy.ksh \
functional/rsend/send_encrypted_props.ksh \
functional/rsend/send_encrypted_truncated_files.ksh \
functional/rsend/send_freeobjects.ksh \
functional/rsend/send_holds.ksh \
functional/rsend/send_hole_birth.ksh \
functional/rsend/send_invalid.ksh \
functional/rsend/send-L_toggle.ksh \
functional/rsend/send_mixed_raw.ksh \
functional/rsend/send_partial_dataset.ksh \
functional/rsend/send_raw_ashift.ksh \
functional/rsend/send_raw_spill_block.ksh \
functional/rsend/send_raw_large_blocks.ksh \
functional/rsend/send_realloc_dnode_size.ksh \
functional/rsend/send_realloc_encrypted_files.ksh \
functional/rsend/send_realloc_files.ksh \
functional/rsend/send_spill_block.ksh \
functional/rsend/send-wR_encrypted_zvol.ksh \
functional/rsend/setup.ksh \
functional/scrub_mirror/cleanup.ksh \
functional/scrub_mirror/scrub_mirror_001_pos.ksh \
functional/scrub_mirror/scrub_mirror_002_pos.ksh \
functional/scrub_mirror/scrub_mirror_003_pos.ksh \
functional/scrub_mirror/scrub_mirror_004_pos.ksh \
functional/scrub_mirror/setup.ksh \
functional/slog/cleanup.ksh \
functional/slog/setup.ksh \
functional/slog/slog_001_pos.ksh \
functional/slog/slog_002_pos.ksh \
functional/slog/slog_003_pos.ksh \
functional/slog/slog_004_pos.ksh \
functional/slog/slog_005_pos.ksh \
functional/slog/slog_006_pos.ksh \
functional/slog/slog_007_pos.ksh \
functional/slog/slog_008_neg.ksh \
functional/slog/slog_009_neg.ksh \
functional/slog/slog_010_neg.ksh \
functional/slog/slog_011_neg.ksh \
functional/slog/slog_012_neg.ksh \
functional/slog/slog_013_pos.ksh \
functional/slog/slog_014_pos.ksh \
functional/slog/slog_015_neg.ksh \
functional/slog/slog_016_pos.ksh \
functional/slog/slog_replay_fs_001.ksh \
functional/slog/slog_replay_fs_002.ksh \
functional/slog/slog_replay_volume.ksh \
functional/snapshot/cleanup.ksh \
functional/snapshot/clone_001_pos.ksh \
functional/snapshot/rollback_001_pos.ksh \
functional/snapshot/rollback_002_pos.ksh \
functional/snapshot/rollback_003_pos.ksh \
functional/snapshot/setup.ksh \
functional/snapshot/snapshot_001_pos.ksh \
functional/snapshot/snapshot_002_pos.ksh \
functional/snapshot/snapshot_003_pos.ksh \
functional/snapshot/snapshot_004_pos.ksh \
functional/snapshot/snapshot_005_pos.ksh \
functional/snapshot/snapshot_006_pos.ksh \
functional/snapshot/snapshot_007_pos.ksh \
functional/snapshot/snapshot_008_pos.ksh \
functional/snapshot/snapshot_009_pos.ksh \
functional/snapshot/snapshot_010_pos.ksh \
functional/snapshot/snapshot_011_pos.ksh \
functional/snapshot/snapshot_012_pos.ksh \
functional/snapshot/snapshot_013_pos.ksh \
functional/snapshot/snapshot_014_pos.ksh \
functional/snapshot/snapshot_015_pos.ksh \
functional/snapshot/snapshot_016_pos.ksh \
functional/snapshot/snapshot_017_pos.ksh \
functional/snapshot/snapshot_018_pos.ksh \
functional/snapused/cleanup.ksh \
functional/snapused/setup.ksh \
functional/snapused/snapused_001_pos.ksh \
functional/snapused/snapused_002_pos.ksh \
functional/snapused/snapused_003_pos.ksh \
functional/snapused/snapused_004_pos.ksh \
functional/snapused/snapused_005_pos.ksh \
functional/sparse/cleanup.ksh \
functional/sparse/setup.ksh \
functional/sparse/sparse_001_pos.ksh \
functional/stat/cleanup.ksh \
functional/stat/setup.ksh \
functional/stat/stat_001_pos.ksh \
functional/suid/cleanup.ksh \
functional/suid/setup.ksh \
functional/suid/suid_write_to_none.ksh \
functional/suid/suid_write_to_sgid.ksh \
functional/suid/suid_write_to_suid.ksh \
functional/suid/suid_write_to_suid_sgid.ksh \
functional/suid/suid_write_zil_replay.ksh \
functional/trim/autotrim_config.ksh \
functional/trim/autotrim_integrity.ksh \
functional/trim/autotrim_trim_integrity.ksh \
functional/trim/cleanup.ksh \
functional/trim/setup.ksh \
functional/trim/trim_config.ksh \
functional/trim/trim_integrity.ksh \
functional/trim/trim_l2arc.ksh \
functional/truncate/cleanup.ksh \
functional/truncate/setup.ksh \
functional/truncate/truncate_001_pos.ksh \
functional/truncate/truncate_002_pos.ksh \
functional/truncate/truncate_timestamps.ksh \
functional/upgrade/cleanup.ksh \
functional/upgrade/setup.ksh \
functional/upgrade/upgrade_projectquota_001_pos.ksh \
functional/upgrade/upgrade_readonly_pool.ksh \
functional/upgrade/upgrade_userobj_001_pos.ksh \
functional/user_namespace/cleanup.ksh \
functional/user_namespace/setup.ksh \
functional/user_namespace/user_namespace_001.ksh \
functional/user_namespace/user_namespace_002.ksh \
functional/user_namespace/user_namespace_003.ksh \
functional/user_namespace/user_namespace_004.ksh \
functional/userquota/cleanup.ksh \
functional/userquota/groupspace_001_pos.ksh \
functional/userquota/groupspace_002_pos.ksh \
functional/userquota/groupspace_003_pos.ksh \
functional/userquota/setup.ksh \
functional/userquota/userquota_001_pos.ksh \
functional/userquota/userquota_002_pos.ksh \
functional/userquota/userquota_003_pos.ksh \
functional/userquota/userquota_004_pos.ksh \
functional/userquota/userquota_005_neg.ksh \
functional/userquota/userquota_006_pos.ksh \
functional/userquota/userquota_007_pos.ksh \
functional/userquota/userquota_008_pos.ksh \
functional/userquota/userquota_009_pos.ksh \
functional/userquota/userquota_010_pos.ksh \
functional/userquota/userquota_011_pos.ksh \
functional/userquota/userquota_012_neg.ksh \
functional/userquota/userquota_013_pos.ksh \
functional/userquota/userspace_001_pos.ksh \
functional/userquota/userspace_002_pos.ksh \
functional/userquota/userspace_003_pos.ksh \
functional/userquota/userspace_encrypted.ksh \
functional/userquota/userspace_send_encrypted.ksh \
functional/userquota/userspace_encrypted_13709.ksh \
functional/vdev_zaps/cleanup.ksh \
functional/vdev_zaps/setup.ksh \
functional/vdev_zaps/vdev_zaps_001_pos.ksh \
functional/vdev_zaps/vdev_zaps_002_pos.ksh \
functional/vdev_zaps/vdev_zaps_003_pos.ksh \
functional/vdev_zaps/vdev_zaps_004_pos.ksh \
functional/vdev_zaps/vdev_zaps_005_pos.ksh \
functional/vdev_zaps/vdev_zaps_006_pos.ksh \
functional/vdev_zaps/vdev_zaps_007_pos.ksh \
functional/write_dirs/cleanup.ksh \
functional/write_dirs/setup.ksh \
functional/write_dirs/write_dirs_001_pos.ksh \
functional/write_dirs/write_dirs_002_pos.ksh \
functional/xattr/cleanup.ksh \
functional/xattr/setup.ksh \
functional/xattr/xattr_001_pos.ksh \
functional/xattr/xattr_002_neg.ksh \
functional/xattr/xattr_003_neg.ksh \
functional/xattr/xattr_004_pos.ksh \
functional/xattr/xattr_005_pos.ksh \
functional/xattr/xattr_006_pos.ksh \
functional/xattr/xattr_007_neg.ksh \
functional/xattr/xattr_008_pos.ksh \
functional/xattr/xattr_009_neg.ksh \
functional/xattr/xattr_010_neg.ksh \
functional/xattr/xattr_011_pos.ksh \
functional/xattr/xattr_012_pos.ksh \
functional/xattr/xattr_013_pos.ksh \
functional/xattr/xattr_compat.ksh \
functional/zpool_influxdb/cleanup.ksh \
functional/zpool_influxdb/setup.ksh \
functional/zpool_influxdb/zpool_influxdb.ksh \
functional/zvol/zvol_cli/cleanup.ksh \
functional/zvol/zvol_cli/setup.ksh \
functional/zvol/zvol_cli/zvol_cli_001_pos.ksh \
functional/zvol/zvol_cli/zvol_cli_002_pos.ksh \
functional/zvol/zvol_cli/zvol_cli_003_neg.ksh \
functional/zvol/zvol_ENOSPC/cleanup.ksh \
functional/zvol/zvol_ENOSPC/setup.ksh \
functional/zvol/zvol_ENOSPC/zvol_ENOSPC_001_pos.ksh \
functional/zvol/zvol_misc/cleanup.ksh \
functional/zvol/zvol_misc/setup.ksh \
functional/zvol/zvol_misc/zvol_misc_001_neg.ksh \
functional/zvol/zvol_misc/zvol_misc_002_pos.ksh \
functional/zvol/zvol_misc/zvol_misc_003_neg.ksh \
functional/zvol/zvol_misc/zvol_misc_004_pos.ksh \
functional/zvol/zvol_misc/zvol_misc_005_neg.ksh \
functional/zvol/zvol_misc/zvol_misc_006_pos.ksh \
functional/zvol/zvol_misc/zvol_misc_fua.ksh \
functional/zvol/zvol_misc/zvol_misc_hierarchy.ksh \
functional/zvol/zvol_misc/zvol_misc_rename_inuse.ksh \
functional/zvol/zvol_misc/zvol_misc_snapdev.ksh \
functional/zvol/zvol_misc/zvol_misc_trim.ksh \
functional/zvol/zvol_misc/zvol_misc_volmode.ksh \
functional/zvol/zvol_misc/zvol_misc_zil.ksh \
functional/zvol/zvol_stress/cleanup.ksh \
functional/zvol/zvol_stress/setup.ksh \
functional/zvol/zvol_stress/zvol_stress.ksh \
functional/zvol/zvol_swap/cleanup.ksh \
functional/zvol/zvol_swap/setup.ksh \
functional/zvol/zvol_swap/zvol_swap_001_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_002_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_003_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_004_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_005_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_006_pos.ksh \
functional/idmap_mount/cleanup.ksh \
functional/idmap_mount/setup.ksh \
functional/idmap_mount/idmap_mount_001.ksh \
functional/idmap_mount/idmap_mount_002.ksh \
functional/idmap_mount/idmap_mount_003.ksh \
functional/idmap_mount/idmap_mount_004.ksh \
functional/idmap_mount/idmap_mount_005.ksh
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning.kshlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning.kshlib
new file mode 100644
index 000000000000..8e16366b4cd6
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning.kshlib
@@ -0,0 +1,54 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023, Klara Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+function have_same_content
+{
+ typeset hash1=$(cat $1 | md5sum)
+ typeset hash2=$(cat $2 | md5sum)
+
+ log_must [ "$hash1" = "$hash2" ]
+}
+
+#
+# get_same_blocks dataset1 path/to/file1 dataset2 path/to/file2
+#
+# Returns a space-separated list of the indexes (starting at 0) of the L0
+# blocks that are shared between both files (by first DVA and checksum).
+# Assumes that the two files have the same content, use have_same_content to
+# confirm that.
+#
+function get_same_blocks
+{
+ typeset zdbout=${TMPDIR:-$TEST_BASE_DIR}/zdbout.$$
+ zdb -vvvvv $1 -O $2 | \
+ awk '/ L0 / { print l++ " " $3 " " $7 }' > $zdbout.a
+ zdb -vvvvv $3 -O $4 | \
+ awk '/ L0 / { print l++ " " $3 " " $7 }' > $zdbout.b
+ echo $(sort $zdbout.a $zdbout.b | uniq -d | cut -f1 -d' ')
+}
+
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange.ksh
new file mode 100755
index 000000000000..43ea47b0ef19
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange.ksh
@@ -0,0 +1,60 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023, Klara Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+
+verify_runnable "global"
+
+if [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
+ log_unsupported "copy_file_range not available before Linux 4.5"
+fi
+
+claim="The copy_file_range syscall can clone whole files."
+
+log_assert $claim
+
+function cleanup
+{
+ datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+}
+
+log_onexit cleanup
+
+log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS
+
+log_must dd if=/dev/urandom of=/$TESTPOOL/file1 bs=128K count=4
+log_must sync_pool $TESTPOOL
+
+log_must clonefile -f /$TESTPOOL/file1 /$TESTPOOL/file2 0 0 524288
+log_must sync_pool $TESTPOOL
+
+log_must have_same_content /$TESTPOOL/file1 /$TESTPOOL/file2
+
+typeset blocks=$(get_same_blocks $TESTPOOL file1 $TESTPOOL file2)
+log_must [ "$blocks" = "0 1 2 3" ]
+
+log_pass $claim
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_cross_dataset.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_cross_dataset.ksh
new file mode 100755
index 000000000000..74e6b04903a3
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_cross_dataset.ksh
@@ -0,0 +1,65 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023, Klara Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+
+verify_runnable "global"
+
+if [[ $(linux_version) -lt $(linux_version "5.3") ]]; then
+ log_unsupported "copy_file_range can't copy cross-filesystem before Linux 5.3"
+fi
+
+claim="The copy_file_range syscall can clone across datasets."
+
+log_assert $claim
+
+function cleanup
+{
+ datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+}
+
+log_onexit cleanup
+
+log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS
+
+log_must zfs create $TESTPOOL/$TESTFS1
+log_must zfs create $TESTPOOL/$TESTFS2
+
+log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS1/file1 bs=128K count=4
+log_must sync_pool $TESTPOOL
+
+log_must \
+ clonefile -f /$TESTPOOL/$TESTFS1/file1 /$TESTPOOL/$TESTFS2/file2 0 0 524288
+log_must sync_pool $TESTPOOL
+
+log_must have_same_content /$TESTPOOL/$TESTFS1/file1 /$TESTPOOL/$TESTFS2/file2
+
+typeset blocks=$(get_same_blocks \
+ $TESTPOOL/$TESTFS1 file1 $TESTPOOL/$TESTFS2 file2)
+log_must [ "$blocks" = "0 1 2 3" ]
+
+log_pass $claim
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_fallback.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_fallback.ksh
new file mode 100755
index 000000000000..9a96eacd60af
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_fallback.ksh
@@ -0,0 +1,86 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023, Klara Inc.
+# Copyright (c) 2023, Rob Norris <robn@despairlabs.com>
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+
+verify_runnable "global"
+
+if [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
+ log_unsupported "copy_file_range not available before Linux 4.5"
+fi
+
+claim="copy_file_range will fall back to copy when cloning not possible."
+
+log_assert $claim
+
+function cleanup
+{
+ datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+}
+
+log_onexit cleanup
+
+log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS
+
+log_must dd if=/dev/urandom of=/$TESTPOOL/file bs=128K count=4
+log_must sync_pool $TESTPOOL
+
+
+log_note "Copying entire file with copy_file_range"
+
+log_must clonefile -f /$TESTPOOL/file /$TESTPOOL/clone 0 0 524288
+log_must sync_pool $TESTPOOL
+
+log_must have_same_content /$TESTPOOL/file /$TESTPOOL/clone
+
+typeset blocks=$(get_same_blocks $TESTPOOL file $TESTPOOL clone)
+log_must [ "$blocks" = "0 1 2 3" ]
+
+
+log_note "Copying within a block with copy_file_range"
+
+log_must clonefile -f /$TESTPOOL/file /$TESTPOOL/clone 32768 32768 65536
+log_must sync_pool $TESTPOOL
+
+log_must have_same_content /$TESTPOOL/file /$TESTPOOL/clone
+
+typeset blocks=$(get_same_blocks $TESTPOOL file $TESTPOOL clone)
+log_must [ "$blocks" = "1 2 3" ]
+
+
+log_note "Copying across a block with copy_file_range"
+
+log_must clonefile -f /$TESTPOOL/file /$TESTPOOL/clone 327680 327680 131072
+log_must sync_pool $TESTPOOL
+
+log_must have_same_content /$TESTPOOL/file /$TESTPOOL/clone
+
+typeset blocks=$(get_same_blocks $TESTPOOL file $TESTPOOL clone)
+log_must [ "$blocks" = "1" ]
+
+log_pass $claim
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_fallback_same_txg.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_fallback_same_txg.ksh
new file mode 100755
index 000000000000..a10545bc0769
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_fallback_same_txg.ksh
@@ -0,0 +1,66 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023, Klara Inc.
+# Copyright (c) 2023, Rob Norris <robn@despairlabs.com>
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+
+verify_runnable "global"
+
+if [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
+ log_unsupported "copy_file_range not available before Linux 4.5"
+fi
+
+claim="copy_file_range will fall back to copy when cloning on same txg"
+
+log_assert $claim
+
+typeset timeout=$(get_tunable TXG_TIMEOUT)
+
+function cleanup
+{
+ datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+ set_tunable64 TXG_TIMEOUT $timeout
+}
+
+log_onexit cleanup
+
+log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS
+
+log_must set_tunable64 TXG_TIMEOUT 5000
+
+log_must dd if=/dev/urandom of=/$TESTPOOL/file bs=128K count=4
+log_must clonefile -f /$TESTPOOL/file /$TESTPOOL/clone 0 0 524288
+
+log_must sync_pool $TESTPOOL
+
+log_must have_same_content /$TESTPOOL/file /$TESTPOOL/clone
+
+typeset blocks=$(get_same_blocks $TESTPOOL file $TESTPOOL clone)
+log_must [ "$blocks" = "" ]
+
+log_pass $claim
+
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_partial.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_partial.ksh
new file mode 100755
index 000000000000..a5da0a0bd359
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_partial.ksh
@@ -0,0 +1,68 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023, Klara Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+
+verify_runnable "global"
+
+if [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
+ log_unsupported "copy_file_range not available before Linux 4.5"
+fi
+
+claim="The copy_file_range syscall can clone parts of a file."
+
+log_assert $claim
+
+function cleanup
+{
+ datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+}
+
+log_onexit cleanup
+
+log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS
+
+log_must dd if=/dev/urandom of=/$TESTPOOL/file1 bs=128K count=4
+log_must sync_pool $TESTPOOL
+
+log_must dd if=/$TESTPOOL/file1 of=/$TESTPOOL/file2 bs=128K count=4
+log_must sync_pool $TESTPOOL
+
+log_must have_same_content /$TESTPOOL/file1 /$TESTPOOL/file2
+
+typeset blocks=$(get_same_blocks $TESTPOOL file1 $TESTPOOL file2)
+log_must [ "$blocks" = "" ]
+
+log_must clonefile -f /$TESTPOOL/file1 /$TESTPOOL/file2 131072 131072 262144
+log_must sync_pool $TESTPOOL
+
+log_must have_same_content /$TESTPOOL/file1 /$TESTPOOL/file2
+
+typeset blocks=$(get_same_blocks $TESTPOOL file1 $TESTPOOL file2)
+log_must [ "$blocks" = "1 2" ]
+
+log_pass $claim
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_disabled_copyfilerange.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_disabled_copyfilerange.ksh
new file mode 100755
index 000000000000..d21b6251134e
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_disabled_copyfilerange.ksh
@@ -0,0 +1,60 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023, Klara Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+
+verify_runnable "global"
+
+if [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
+ log_unsupported "copy_file_range not available before Linux 4.5"
+fi
+
+claim="The copy_file_range syscall copies files when block cloning is disabled."
+
+log_assert $claim
+
+function cleanup
+{
+ datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+}
+
+log_onexit cleanup
+
+log_must zpool create -o feature@block_cloning=disabled $TESTPOOL $DISKS
+
+log_must dd if=/dev/urandom of=/$TESTPOOL/file1 bs=128K count=4
+log_must sync_pool $TESTPOOL
+
+log_must clonefile -f /$TESTPOOL/file1 /$TESTPOOL/file2 0 0 524288
+log_must sync_pool $TESTPOOL
+
+log_must have_same_content /$TESTPOOL/file1 /$TESTPOOL/file2
+
+typeset blocks=$(get_same_blocks $TESTPOOL file1 $TESTPOOL file2)
+log_must [ "$blocks" = "" ]
+
+log_pass $claim
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_disabled_ficlone.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_disabled_ficlone.ksh
new file mode 100755
index 000000000000..10a2715ea253
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_disabled_ficlone.ksh
@@ -0,0 +1,50 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023, Klara Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+
+verify_runnable "global"
+
+claim="The FICLONE ioctl fails when block cloning is disabled."
+
+log_assert $claim
+
+function cleanup
+{
+ datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+}
+
+log_onexit cleanup
+
+log_must zpool create -o feature@block_cloning=disabled $TESTPOOL $DISKS
+
+log_must dd if=/dev/urandom of=/$TESTPOOL/file1 bs=128K count=4
+log_must sync_pool $TESTPOOL
+
+log_mustnot clonefile -c /$TESTPOOL/file1 /$TESTPOOL/file2
+
+log_pass $claim
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_disabled_ficlonerange.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_disabled_ficlonerange.ksh
new file mode 100755
index 000000000000..e8461e6d3c38
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_disabled_ficlonerange.ksh
@@ -0,0 +1,50 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023, Klara Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+
+verify_runnable "global"
+
+claim="The FICLONERANGE ioctl fails when block cloning is disabled."
+
+log_assert $claim
+
+function cleanup
+{
+ datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+}
+
+log_onexit cleanup
+
+log_must zpool create -o feature@block_cloning=disabled $TESTPOOL $DISKS
+
+log_must dd if=/dev/urandom of=/$TESTPOOL/file1 bs=128K count=4
+log_must sync_pool $TESTPOOL
+
+log_mustnot clonefile -r /$TESTPOOL/file1 /$TESTPOOL/file2 0 0 524288
+
+log_pass $claim
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_ficlone.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_ficlone.ksh
new file mode 100755
index 000000000000..3f227fb68ee3
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_ficlone.ksh
@@ -0,0 +1,56 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023, Klara Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+
+verify_runnable "global"
+
+claim="The FICLONE ioctl can clone files."
+
+log_assert $claim
+
+function cleanup
+{
+ datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+}
+
+log_onexit cleanup
+
+log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS
+
+log_must dd if=/dev/urandom of=/$TESTPOOL/file1 bs=128K count=4
+log_must sync_pool $TESTPOOL
+
+log_must clonefile -c /$TESTPOOL/file1 /$TESTPOOL/file2
+log_must sync_pool $TESTPOOL
+
+log_must have_same_content /$TESTPOOL/file1 /$TESTPOOL/file2
+
+typeset blocks=$(get_same_blocks $TESTPOOL file1 $TESTPOOL file2)
+log_must [ "$blocks" = "0 1 2 3" ]
+
+log_pass $claim
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_ficlonerange.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_ficlonerange.ksh
new file mode 100755
index 000000000000..cefc4336aefd
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_ficlonerange.ksh
@@ -0,0 +1,56 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023, Klara Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+
+verify_runnable "global"
+
+claim="The FICLONERANGE ioctl can clone whole files."
+
+log_assert $claim
+
+function cleanup
+{
+ datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+}
+
+log_onexit cleanup
+
+log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS
+
+log_must dd if=/dev/urandom of=/$TESTPOOL/file1 bs=128K count=4
+log_must sync_pool $TESTPOOL
+
+log_must clonefile -r /$TESTPOOL/file1 /$TESTPOOL/file2 0 0 524288
+log_must sync_pool $TESTPOOL
+
+log_must have_same_content /$TESTPOOL/file1 /$TESTPOOL/file2
+
+typeset blocks=$(get_same_blocks $TESTPOOL file1 $TESTPOOL file2)
+log_must [ "$blocks" = "0 1 2 3" ]
+
+log_pass $claim
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_ficlonerange_partial.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_ficlonerange_partial.ksh
new file mode 100755
index 000000000000..067f55aaa65b
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_ficlonerange_partial.ksh
@@ -0,0 +1,64 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023, Klara Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+
+verify_runnable "global"
+
+claim="The FICLONERANGE ioctl can clone parts of a file."
+
+log_assert $claim
+
+function cleanup
+{
+ datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+}
+
+log_onexit cleanup
+
+log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS
+
+log_must dd if=/dev/urandom of=/$TESTPOOL/file1 bs=128K count=4
+log_must sync_pool $TESTPOOL
+
+log_must dd if=/$TESTPOOL/file1 of=/$TESTPOOL/file2 bs=128K count=4
+log_must sync_pool $TESTPOOL
+
+log_must have_same_content /$TESTPOOL/file1 /$TESTPOOL/file2
+
+typeset blocks=$(get_same_blocks $TESTPOOL file1 $TESTPOOL file2)
+log_must [ "$blocks" = "" ]
+
+log_must clonefile -r /$TESTPOOL/file1 /$TESTPOOL/file2 131072 131072 262144
+log_must sync_pool $TESTPOOL
+
+log_must have_same_content /$TESTPOOL/file1 /$TESTPOOL/file2
+
+typeset blocks=$(get_same_blocks $TESTPOOL file1 $TESTPOOL file2)
+log_must [ "$blocks" = "1 2" ]
+
+log_pass $claim
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/cleanup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/cleanup.ksh
new file mode 100755
index 000000000000..7ac13adb6325
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/cleanup.ksh
@@ -0,0 +1,34 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023, Klara Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+
+verify_runnable "global"
+
+default_cleanup_noexit
+
+log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
new file mode 100755
index 000000000000..512f5a0644df
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
@@ -0,0 +1,36 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023, Klara Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+
+if ! command -v clonefile > /dev/null ; then
+ log_unsupported "clonefile program required to test block cloning"
+fi
+
+verify_runnable "global"
+
+log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_attach/attach-o_ashift.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_attach/attach-o_ashift.ksh
index 6ccec6abd66f..574cb7654d10 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_attach/attach-o_ashift.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_attach/attach-o_ashift.ksh
@@ -1,101 +1,89 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2017, loli10K. All rights reserved.
# Copyright (c) 2020 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_create/zpool_create.shlib
#
# DESCRIPTION:
# 'zpool attach -o ashift=<n> ...' should work with different ashift
# values.
#
# STRATEGY:
# 1. Create various pools with different ashift values.
-# 2. Verify 'attach -o ashift=<n>' works only with allowed values.
+# 2. Verify 'attach' works.
#
verify_runnable "global"
function cleanup
{
log_must set_tunable32 VDEV_FILE_PHYSICAL_ASHIFT $orig_ashift
poolexists $TESTPOOL1 && destroy_pool $TESTPOOL1
rm -f $disk1 $disk2
}
log_assert "zpool attach -o ashift=<n>' works with different ashift values"
log_onexit cleanup
disk1=$TEST_BASE_DIR/disk1
disk2=$TEST_BASE_DIR/disk2
log_must truncate -s $SIZE $disk1
log_must truncate -s $SIZE $disk2
orig_ashift=$(get_tunable VDEV_FILE_PHYSICAL_ASHIFT)
#
# Set the file vdev's ashift to the max. Overriding
# the ashift using the -o ashift property should still
# be honored.
#
log_must set_tunable32 VDEV_FILE_PHYSICAL_ASHIFT 16
typeset ashifts=("9" "10" "11" "12" "13" "14" "15" "16")
for ashift in ${ashifts[@]}
do
- for cmdval in ${ashifts[@]}
- do
- log_must zpool create -o ashift=$ashift $TESTPOOL1 $disk1
- log_must verify_ashift $disk1 $ashift
-
- # ashift_of(attached_disk) <= ashift_of(existing_vdev)
- if [[ $cmdval -le $ashift ]]
- then
- log_must zpool attach -o ashift=$cmdval $TESTPOOL1 \
- $disk1 $disk2
- log_must verify_ashift $disk2 $ashift
- else
- log_mustnot zpool attach -o ashift=$cmdval $TESTPOOL1 \
- $disk1 $disk2
- fi
- # clean things for the next run
- log_must zpool destroy $TESTPOOL1
- log_must zpool labelclear $disk1
- log_must zpool labelclear $disk2
- done
+ log_must zpool create -o ashift=$ashift $TESTPOOL1 $disk1
+ log_must verify_ashift $disk1 $ashift
+ log_must zpool attach $TESTPOOL1 $disk1 $disk2
+ log_must verify_ashift $disk2 $ashift
+ # clean things for the next run
+ log_must zpool destroy $TESTPOOL1
+ log_must zpool labelclear $disk1
+ log_must zpool labelclear $disk2
done
typeset badvals=("off" "on" "1" "8" "17" "1b" "ff" "-")
for badval in ${badvals[@]}
do
log_must zpool create $TESTPOOL1 $disk1
log_mustnot zpool attach -o ashift=$badval $TESTPOOL1 $disk1 $disk2
log_must zpool destroy $TESTPOOL1
log_must zpool labelclear $disk1
log_mustnot zpool labelclear $disk2
done
log_pass "zpool attach -o ashift=<n>' works with different ashift values"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_replace/replace-o_ashift.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_replace/replace-o_ashift.ksh
index 37ed0062e61c..9595e51241b3 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_replace/replace-o_ashift.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_replace/replace-o_ashift.ksh
@@ -1,101 +1,91 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2017, loli10K. All rights reserved.
# Copyright (c) 2020 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_create/zpool_create.shlib
#
# DESCRIPTION:
# 'zpool replace -o ashift=<n> ...' should work with different ashift
# values.
#
# STRATEGY:
# 1. Create various pools with different ashift values.
-# 2. Verify 'replace -o ashift=<n>' works only with allowed values.
+# 2. Verify 'replace' works.
#
verify_runnable "global"
function cleanup
{
log_must set_tunable32 VDEV_FILE_PHYSICAL_ASHIFT $orig_ashift
poolexists $TESTPOOL1 && destroy_pool $TESTPOOL1
rm -f $disk1 $disk2
}
log_assert "zpool replace -o ashift=<n>' works with different ashift values"
log_onexit cleanup
disk1=$TEST_BASE_DIR/disk1
disk2=$TEST_BASE_DIR/disk2
log_must truncate -s $SIZE $disk1
log_must truncate -s $SIZE $disk2
orig_ashift=$(get_tunable VDEV_FILE_PHYSICAL_ASHIFT)
#
# Set the file vdev's ashift to the max. Overriding
# the ashift using the -o ashift property should still
# be honored.
#
log_must set_tunable32 VDEV_FILE_PHYSICAL_ASHIFT 16
typeset ashifts=("9" "10" "11" "12" "13" "14" "15" "16")
for ashift in ${ashifts[@]}
do
- for cmdval in ${ashifts[@]}
- do
- log_must zpool create -o ashift=$ashift $TESTPOOL1 $disk1
- log_must verify_ashift $disk1 $ashift
- # ashift_of(replacing_disk) <= ashift_of(existing_vdev)
- if [[ $cmdval -le $ashift ]]
- then
- log_must zpool replace -o ashift=$cmdval $TESTPOOL1 \
- $disk1 $disk2
- log_must verify_ashift $disk2 $ashift
- wait_replacing $TESTPOOL1
- else
- log_mustnot zpool replace -o ashift=$cmdval $TESTPOOL1 \
- $disk1 $disk2
- fi
- # clean things for the next run
- log_must zpool destroy $TESTPOOL1
- log_must zpool labelclear $disk1
- log_must zpool labelclear $disk2
- done
+ log_must zpool create -o ashift=$ashift $TESTPOOL1 $disk1
+ log_must verify_ashift $disk1 $ashift
+ # ashift_of(replacing_disk) <= ashift_of(existing_vdev)
+ log_must zpool replace $TESTPOOL1 $disk1 $disk2
+ log_must verify_ashift $disk2 $ashift
+ wait_replacing $TESTPOOL1
+ # clean things for the next run
+ log_must zpool destroy $TESTPOOL1
+ log_must zpool labelclear $disk1
+ log_must zpool labelclear $disk2
done
typeset badvals=("off" "on" "1" "8" "17" "1b" "ff" "-")
for badval in ${badvals[@]}
do
log_must zpool create $TESTPOOL1 $disk1
log_mustnot zpool replace -o ashift=$badval $TESTPOOL1 $disk1 $disk2
log_must zpool destroy $TESTPOOL1
log_must zpool labelclear $disk1
log_mustnot zpool labelclear $disk2
done
log_pass "zpool replace -o ashift=<n>' works with different ashift values"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_replace/replace_prop_ashift.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_replace/replace_prop_ashift.ksh
index ffdaf91a2841..b4ac18e5ea25 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_replace/replace_prop_ashift.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_replace/replace_prop_ashift.ksh
@@ -1,97 +1,83 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2017, loli10K. All rights reserved.
# Copyright (c) 2020 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_create/zpool_create.shlib
#
# DESCRIPTION:
# 'zpool replace' should use the ashift pool property value as default.
#
# STRATEGY:
# 1. Create a pool with default values.
-# 2. Verify 'zpool replace' uses the ashift pool property value when
-# replacing an existing device.
-# 3. Verify the default ashift value can still be overridden by manually
-# specifying '-o ashift=<n>' from the command line.
+# 2. Override the pool ashift property.
+# 3. Verify 'zpool replace' works.
#
verify_runnable "global"
function cleanup
{
log_must set_tunable32 VDEV_FILE_PHYSICAL_ASHIFT $orig_ashift
poolexists $TESTPOOL1 && destroy_pool $TESTPOOL1
rm -f $disk1 $disk2
}
log_assert "'zpool replace' uses the ashift pool property value as default."
log_onexit cleanup
disk1=$TEST_BASE_DIR/disk1
disk2=$TEST_BASE_DIR/disk2
log_must truncate -s $SIZE $disk1
log_must truncate -s $SIZE $disk2
orig_ashift=$(get_tunable VDEV_FILE_PHYSICAL_ASHIFT)
#
# Set the file vdev's ashift to the max. Overriding
# the ashift using the -o ashift property should still
# be honored.
#
log_must set_tunable32 VDEV_FILE_PHYSICAL_ASHIFT 16
typeset ashifts=("9" "10" "11" "12" "13" "14" "15" "16")
for ashift in ${ashifts[@]}
do
for pprop in ${ashifts[@]}
do
log_must zpool create -o ashift=$ashift $TESTPOOL1 $disk1
log_must zpool set ashift=$pprop $TESTPOOL1
- # ashift_of(replacing_disk) <= ashift_of(existing_vdev)
- if [[ $pprop -le $ashift ]]
- then
- log_must zpool replace $TESTPOOL1 $disk1 $disk2
- wait_replacing $TESTPOOL1
- log_must verify_ashift $disk2 $ashift
- else
- # cannot replace if pool prop ashift > vdev ashift
- log_mustnot zpool replace $TESTPOOL1 $disk1 $disk2
- # verify we can override the pool prop value manually
- log_must zpool replace -o ashift=$ashift $TESTPOOL1 \
- $disk1 $disk2
- wait_replacing $TESTPOOL1
- log_must verify_ashift $disk2 $ashift
- fi
+ log_must zpool replace $TESTPOOL1 $disk1 $disk2
+ wait_replacing $TESTPOOL1
+ log_must verify_ashift $disk2 $ashift
# clean things for the next run
log_must zpool destroy $TESTPOOL1
log_must zpool labelclear $disk1
log_must zpool labelclear $disk2
done
done
log_pass "'zpool replace' uses the ashift pool property value."
diff --git a/sys/modules/zfs/zfs_config.h b/sys/modules/zfs/zfs_config.h
index bc0ec8005351..29425d3368e0 100644
--- a/sys/modules/zfs/zfs_config.h
+++ b/sys/modules/zfs/zfs_config.h
@@ -1,1089 +1,1113 @@
/*
*/
/* zfs_config.h. Generated from zfs_config.h.in by configure. */
/* zfs_config.h.in. Generated from configure.ac by autoheader. */
/* Define to 1 if translation of program messages to the user's native
language is requested. */
/* #undef ENABLE_NLS */
/* bio_end_io_t wants 1 arg */
/* #undef HAVE_1ARG_BIO_END_IO_T */
/* lookup_bdev() wants 1 arg */
/* #undef HAVE_1ARG_LOOKUP_BDEV */
/* submit_bio() wants 1 arg */
/* #undef HAVE_1ARG_SUBMIT_BIO */
/* bdi_setup_and_register() wants 2 args */
/* #undef HAVE_2ARGS_BDI_SETUP_AND_REGISTER */
/* vfs_getattr wants 2 args */
/* #undef HAVE_2ARGS_VFS_GETATTR */
/* zlib_deflate_workspacesize() wants 2 args */
/* #undef HAVE_2ARGS_ZLIB_DEFLATE_WORKSPACESIZE */
/* bdi_setup_and_register() wants 3 args */
/* #undef HAVE_3ARGS_BDI_SETUP_AND_REGISTER */
/* vfs_getattr wants 3 args */
/* #undef HAVE_3ARGS_VFS_GETATTR */
/* vfs_getattr wants 4 args */
/* #undef HAVE_4ARGS_VFS_GETATTR */
/* kernel has access_ok with 'type' parameter */
/* #undef HAVE_ACCESS_OK_TYPE */
/* posix_acl has refcount_t */
/* #undef HAVE_ACL_REFCOUNT */
/* add_disk() returns int */
/* #undef HAVE_ADD_DISK_RET */
/* Define if host toolchain supports AES */
#define HAVE_AES 1
/* Define if you have [rt] */
#define HAVE_AIO_H 1
#ifdef __amd64__
#ifndef RESCUE
/* Define if host toolchain supports AVX */
#define HAVE_AVX 1
#endif
/* Define if host toolchain supports AVX2 */
#define HAVE_AVX2 1
/* Define if host toolchain supports AVX512BW */
#define HAVE_AVX512BW 1
/* Define if host toolchain supports AVX512CD */
#define HAVE_AVX512CD 1
/* Define if host toolchain supports AVX512DQ */
#define HAVE_AVX512DQ 1
/* Define if host toolchain supports AVX512ER */
#define HAVE_AVX512ER 1
/* Define if host toolchain supports AVX512F */
#define HAVE_AVX512F 1
/* Define if host toolchain supports AVX512IFMA */
#define HAVE_AVX512IFMA 1
/* Define if host toolchain supports AVX512PF */
#define HAVE_AVX512PF 1
/* Define if host toolchain supports AVX512VBMI */
#define HAVE_AVX512VBMI 1
/* Define if host toolchain supports AVX512VL */
#define HAVE_AVX512VL 1
#endif
/* bdevname() is available */
/* #undef HAVE_BDEVNAME */
/* bdev_check_media_change() exists */
/* #undef HAVE_BDEV_CHECK_MEDIA_CHANGE */
/* bdev_*_io_acct() available */
/* #undef HAVE_BDEV_IO_ACCT_63 */
/* bdev_*_io_acct() available */
/* #undef HAVE_BDEV_IO_ACCT_OLD */
/* bdev_kobj() exists */
/* #undef HAVE_BDEV_KOBJ */
/* bdev_max_discard_sectors() is available */
/* #undef HAVE_BDEV_MAX_DISCARD_SECTORS */
/* bdev_max_secure_erase_sectors() is available */
/* #undef HAVE_BDEV_MAX_SECURE_ERASE_SECTORS */
/* block_device_operations->submit_bio() returns void */
/* #undef HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID */
/* bdev_whole() is available */
/* #undef HAVE_BDEV_WHOLE */
/* bio_alloc() takes 4 arguments */
/* #undef HAVE_BIO_ALLOC_4ARG */
/* bio->bi_bdev->bd_disk exists */
/* #undef HAVE_BIO_BDEV_DISK */
/* bio->bi_opf is defined */
/* #undef HAVE_BIO_BI_OPF */
/* bio->bi_status exists */
/* #undef HAVE_BIO_BI_STATUS */
/* bio has bi_iter */
/* #undef HAVE_BIO_BVEC_ITER */
/* bio_*_io_acct() available */
/* #undef HAVE_BIO_IO_ACCT */
/* bio_max_segs() is implemented */
/* #undef HAVE_BIO_MAX_SEGS */
/* bio_set_dev() is available */
/* #undef HAVE_BIO_SET_DEV */
/* bio_set_dev() GPL-only */
/* #undef HAVE_BIO_SET_DEV_GPL_ONLY */
/* bio_set_dev() is a macro */
/* #undef HAVE_BIO_SET_DEV_MACRO */
/* bio_set_op_attrs is available */
/* #undef HAVE_BIO_SET_OP_ATTRS */
/* blkdev_get_by_path() handles ERESTARTSYS */
/* #undef HAVE_BLKDEV_GET_ERESTARTSYS */
/* blkdev_issue_discard() is available */
/* #undef HAVE_BLKDEV_ISSUE_DISCARD */
/* blkdev_issue_secure_erase() is available */
/* #undef HAVE_BLKDEV_ISSUE_SECURE_ERASE */
/* blkdev_reread_part() exists */
/* #undef HAVE_BLKDEV_REREAD_PART */
/* blkg_tryget() is available */
/* #undef HAVE_BLKG_TRYGET */
/* blkg_tryget() GPL-only */
/* #undef HAVE_BLKG_TRYGET_GPL_ONLY */
/* blk_alloc_disk() exists */
/* #undef HAVE_BLK_ALLOC_DISK */
/* blk_alloc_queue() expects request function */
/* #undef HAVE_BLK_ALLOC_QUEUE_REQUEST_FN */
/* blk_alloc_queue_rh() expects request function */
/* #undef HAVE_BLK_ALLOC_QUEUE_REQUEST_FN_RH */
/* blk_cleanup_disk() exists */
/* #undef HAVE_BLK_CLEANUP_DISK */
/* block multiqueue is available */
/* #undef HAVE_BLK_MQ */
/* blk queue backing_dev_info is dynamic */
/* #undef HAVE_BLK_QUEUE_BDI_DYNAMIC */
/* blk_queue_discard() is available */
/* #undef HAVE_BLK_QUEUE_DISCARD */
/* blk_queue_flag_clear() exists */
/* #undef HAVE_BLK_QUEUE_FLAG_CLEAR */
/* blk_queue_flag_set() exists */
/* #undef HAVE_BLK_QUEUE_FLAG_SET */
/* blk_queue_flush() is available */
/* #undef HAVE_BLK_QUEUE_FLUSH */
/* blk_queue_flush() is GPL-only */
/* #undef HAVE_BLK_QUEUE_FLUSH_GPL_ONLY */
/* blk_queue_secdiscard() is available */
/* #undef HAVE_BLK_QUEUE_SECDISCARD */
/* blk_queue_secure_erase() is available */
/* #undef HAVE_BLK_QUEUE_SECURE_ERASE */
/* blk_queue_update_readahead() exists */
/* #undef HAVE_BLK_QUEUE_UPDATE_READAHEAD */
/* blk_queue_write_cache() exists */
/* #undef HAVE_BLK_QUEUE_WRITE_CACHE */
/* blk_queue_write_cache() is GPL-only */
/* #undef HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY */
+/* BLK_STS_RESV_CONFLICT is defined */
+/* #undef HAVE_BLK_STS_RESV_CONFLICT */
+
/* Define if revalidate_disk() in block_device_operations */
/* #undef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK */
/* Define to 1 if you have the Mac OS X function CFLocaleCopyCurrent in the
CoreFoundation framework. */
/* #undef HAVE_CFLOCALECOPYCURRENT */
/* Define to 1 if you have the Mac OS X function
CFLocaleCopyPreferredLanguages in the CoreFoundation framework. */
/* #undef HAVE_CFLOCALECOPYPREFERREDLANGUAGES */
/* Define to 1 if you have the Mac OS X function CFPreferencesCopyAppValue in
the CoreFoundation framework. */
/* #undef HAVE_CFPREFERENCESCOPYAPPVALUE */
/* check_disk_change() exists */
/* #undef HAVE_CHECK_DISK_CHANGE */
/* clear_inode() is available */
/* #undef HAVE_CLEAR_INODE */
/* dentry uses const struct dentry_operations */
/* #undef HAVE_CONST_DENTRY_OPERATIONS */
/* copy_from_iter() is available */
/* #undef HAVE_COPY_FROM_ITER */
/* copy_to_iter() is available */
/* #undef HAVE_COPY_TO_ITER */
/* cpu_has_feature() is GPL-only */
/* #undef HAVE_CPU_HAS_FEATURE_GPL_ONLY */
/* yes */
/* #undef HAVE_CPU_HOTPLUG */
/* current_time() exists */
/* #undef HAVE_CURRENT_TIME */
/* Define if the GNU dcgettext() function is already present or preinstalled.
*/
/* #undef HAVE_DCGETTEXT */
/* DECLARE_EVENT_CLASS() is available */
/* #undef HAVE_DECLARE_EVENT_CLASS */
/* dentry aliases are in d_u member */
/* #undef HAVE_DENTRY_D_U_ALIASES */
/* dequeue_signal() takes 4 arguments */
/* #undef HAVE_DEQUEUE_SIGNAL_4ARG */
/* lookup_bdev() wants dev_t arg */
/* #undef HAVE_DEVT_LOOKUP_BDEV */
/* sops->dirty_inode() wants flags */
/* #undef HAVE_DIRTY_INODE_WITH_FLAGS */
+/* disk_check_media_change() exists */
+/* #undef HAVE_DISK_CHECK_MEDIA_CHANGE */
+
/* disk_*_io_acct() available */
/* #undef HAVE_DISK_IO_ACCT */
/* disk_update_readahead() exists */
/* #undef HAVE_DISK_UPDATE_READAHEAD */
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
/* d_make_root() is available */
/* #undef HAVE_D_MAKE_ROOT */
/* d_prune_aliases() is available */
/* #undef HAVE_D_PRUNE_ALIASES */
/* dops->d_revalidate() operation takes nameidata */
/* #undef HAVE_D_REVALIDATE_NAMEIDATA */
/* eops->encode_fh() wants child and parent inodes */
/* #undef HAVE_ENCODE_FH_WITH_INODE */
/* sops->evict_inode() exists */
/* #undef HAVE_EVICT_INODE */
/* FALLOC_FL_ZERO_RANGE is defined */
/* #undef HAVE_FALLOC_FL_ZERO_RANGE */
/* fault_in_iov_iter_readable() is available */
/* #undef HAVE_FAULT_IN_IOV_ITER_READABLE */
/* filemap_range_has_page() is available */
/* #undef HAVE_FILEMAP_RANGE_HAS_PAGE */
/* fops->aio_fsync() exists */
/* #undef HAVE_FILE_AIO_FSYNC */
/* file_dentry() is available */
/* #undef HAVE_FILE_DENTRY */
/* fops->fadvise() exists */
/* #undef HAVE_FILE_FADVISE */
/* file_inode() is available */
/* #undef HAVE_FILE_INODE */
/* flush_dcache_page() is GPL-only */
/* #undef HAVE_FLUSH_DCACHE_PAGE_GPL_ONLY */
/* iops->follow_link() cookie */
/* #undef HAVE_FOLLOW_LINK_COOKIE */
/* iops->follow_link() nameidata */
/* #undef HAVE_FOLLOW_LINK_NAMEIDATA */
/* Define if compiler supports -Wformat-overflow */
/* #undef HAVE_FORMAT_OVERFLOW */
/* fops->fsync() with range */
/* #undef HAVE_FSYNC_RANGE */
/* fops->fsync() without dentry */
/* #undef HAVE_FSYNC_WITHOUT_DENTRY */
/* yes */
/* #undef HAVE_GENERIC_FADVISE */
/* generic_fillattr requires struct mnt_idmap* */
/* #undef HAVE_GENERIC_FILLATTR_IDMAP */
/* generic_fillattr requires struct user_namespace* */
/* #undef HAVE_GENERIC_FILLATTR_USERNS */
/* generic_*_io_acct() 3 arg available */
/* #undef HAVE_GENERIC_IO_ACCT_3ARG */
/* generic_*_io_acct() 4 arg available */
/* #undef HAVE_GENERIC_IO_ACCT_4ARG */
/* generic_readlink is global */
/* #undef HAVE_GENERIC_READLINK */
/* generic_setxattr() exists */
/* #undef HAVE_GENERIC_SETXATTR */
/* generic_write_checks() takes kiocb */
/* #undef HAVE_GENERIC_WRITE_CHECKS_KIOCB */
/* Define if the GNU gettext() function is already present or preinstalled. */
/* #undef HAVE_GETTEXT */
/* iops->get_acl() exists */
/* #undef HAVE_GET_ACL */
/* iops->get_acl() takes rcu */
/* #undef HAVE_GET_ACL_RCU */
/* has iops->get_inode_acl() */
/* #undef HAVE_GET_INODE_ACL */
/* iops->get_link() cookie */
/* #undef HAVE_GET_LINK_COOKIE */
/* iops->get_link() delayed */
/* #undef HAVE_GET_LINK_DELAYED */
/* group_info->gid exists */
/* #undef HAVE_GROUP_INFO_GID */
/* has_capability() is available */
/* #undef HAVE_HAS_CAPABILITY */
/* iattr->ia_vfsuid and iattr->ia_vfsgid exist */
/* #undef HAVE_IATTR_VFSID */
/* Define if you have the iconv() function and it works. */
#define HAVE_ICONV 1
/* iops->getattr() takes struct mnt_idmap* */
/* #undef HAVE_IDMAP_IOPS_GETATTR */
/* iops->setattr() takes struct mnt_idmap* */
/* #undef HAVE_IDMAP_IOPS_SETATTR */
/* APIs for idmapped mount are present */
/* #undef HAVE_IDMAP_MNT_API */
/* Define if compiler supports -Wimplicit-fallthrough */
/* #undef HAVE_IMPLICIT_FALLTHROUGH */
/* Define if compiler supports -Winfinite-recursion */
/* #undef HAVE_INFINITE_RECURSION */
/* yes */
/* #undef HAVE_INODE_LOCK_SHARED */
/* inode_owner_or_capable() exists */
/* #undef HAVE_INODE_OWNER_OR_CAPABLE */
/* inode_owner_or_capable() takes mnt_idmap */
/* #undef HAVE_INODE_OWNER_OR_CAPABLE_IDMAP */
/* inode_owner_or_capable() takes user_ns */
/* #undef HAVE_INODE_OWNER_OR_CAPABLE_USERNS */
/* inode_set_flags() exists */
/* #undef HAVE_INODE_SET_FLAGS */
/* inode_set_iversion() exists */
/* #undef HAVE_INODE_SET_IVERSION */
/* inode->i_*time's are timespec64 */
/* #undef HAVE_INODE_TIMESPEC64_TIMES */
/* timestamp_truncate() exists */
/* #undef HAVE_INODE_TIMESTAMP_TRUNCATE */
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* in_compat_syscall() is available */
/* #undef HAVE_IN_COMPAT_SYSCALL */
/* iops->create() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_CREATE_IDMAP */
/* iops->create() takes struct user_namespace* */
/* #undef HAVE_IOPS_CREATE_USERNS */
/* iops->mkdir() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_MKDIR_IDMAP */
/* iops->mkdir() takes struct user_namespace* */
/* #undef HAVE_IOPS_MKDIR_USERNS */
/* iops->mknod() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_MKNOD_IDMAP */
/* iops->mknod() takes struct user_namespace* */
/* #undef HAVE_IOPS_MKNOD_USERNS */
/* iops->permission() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_PERMISSION_IDMAP */
/* iops->permission() takes struct user_namespace* */
/* #undef HAVE_IOPS_PERMISSION_USERNS */
/* iops->rename() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_RENAME_IDMAP */
/* iops->rename() takes struct user_namespace* */
/* #undef HAVE_IOPS_RENAME_USERNS */
/* iops->setattr() exists */
/* #undef HAVE_IOPS_SETATTR */
/* iops->symlink() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_SYMLINK_IDMAP */
/* iops->symlink() takes struct user_namespace* */
/* #undef HAVE_IOPS_SYMLINK_USERNS */
/* iov_iter_advance() is available */
/* #undef HAVE_IOV_ITER_ADVANCE */
/* iov_iter_count() is available */
/* #undef HAVE_IOV_ITER_COUNT */
/* iov_iter_fault_in_readable() is available */
/* #undef HAVE_IOV_ITER_FAULT_IN_READABLE */
/* iov_iter_revert() is available */
/* #undef HAVE_IOV_ITER_REVERT */
/* iov_iter_type() is available */
/* #undef HAVE_IOV_ITER_TYPE */
/* iov_iter types are available */
/* #undef HAVE_IOV_ITER_TYPES */
/* yes */
/* #undef HAVE_IO_SCHEDULE_TIMEOUT */
/* Define to 1 if you have the `issetugid' function. */
#define HAVE_ISSETUGID 1
/* kernel has kernel_fpu_* functions */
/* #undef HAVE_KERNEL_FPU */
/* kernel has asm/fpu/api.h */
/* #undef HAVE_KERNEL_FPU_API_HEADER */
/* kernel fpu internal */
/* #undef HAVE_KERNEL_FPU_INTERNAL */
/* kernel has asm/fpu/internal.h */
/* #undef HAVE_KERNEL_FPU_INTERNAL_HEADER */
/* uncached_acl_sentinel() exists */
/* #undef HAVE_KERNEL_GET_ACL_HANDLE_CACHE */
/* Define if compiler supports -Winfinite-recursion */
/* #undef HAVE_KERNEL_INFINITE_RECURSION */
/* kernel does stack verification */
/* #undef HAVE_KERNEL_OBJTOOL */
/* kernel has linux/objtool.h */
/* #undef HAVE_KERNEL_OBJTOOL_HEADER */
/* kernel_read() take loff_t pointer */
/* #undef HAVE_KERNEL_READ_PPOS */
/* timer_list.function gets a timer_list */
/* #undef HAVE_KERNEL_TIMER_FUNCTION_TIMER_LIST */
/* struct timer_list has a flags member */
/* #undef HAVE_KERNEL_TIMER_LIST_FLAGS */
/* timer_setup() is available */
/* #undef HAVE_KERNEL_TIMER_SETUP */
/* kernel_write() take loff_t pointer */
/* #undef HAVE_KERNEL_WRITE_PPOS */
/* kmem_cache_create_usercopy() exists */
/* #undef HAVE_KMEM_CACHE_CREATE_USERCOPY */
/* kstrtoul() exists */
/* #undef HAVE_KSTRTOUL */
/* ktime_get_coarse_real_ts64() exists */
/* #undef HAVE_KTIME_GET_COARSE_REAL_TS64 */
/* ktime_get_raw_ts64() exists */
/* #undef HAVE_KTIME_GET_RAW_TS64 */
/* kvmalloc exists */
/* #undef HAVE_KVMALLOC */
/* Define if you have [aio] */
/* #undef HAVE_LIBAIO */
/* Define if you have [blkid] */
/* #undef HAVE_LIBBLKID */
/* Define if you have [crypto] */
#define HAVE_LIBCRYPTO 1
/* Define if you have [tirpc] */
/* #undef HAVE_LIBTIRPC */
/* Define if you have [udev] */
/* #undef HAVE_LIBUDEV */
/* Define if you have [uuid] */
/* #undef HAVE_LIBUUID */
/* linux/blk-cgroup.h exists */
/* #undef HAVE_LINUX_BLK_CGROUP_HEADER */
/* lseek_execute() is available */
/* #undef HAVE_LSEEK_EXECUTE */
/* makedev() is declared in sys/mkdev.h */
/* #undef HAVE_MAKEDEV_IN_MKDEV */
/* makedev() is declared in sys/sysmacros.h */
/* #undef HAVE_MAKEDEV_IN_SYSMACROS */
/* Noting that make_request_fn() returns blk_qc_t */
/* #undef HAVE_MAKE_REQUEST_FN_RET_QC */
/* Noting that make_request_fn() returns void */
/* #undef HAVE_MAKE_REQUEST_FN_RET_VOID */
/* iops->mkdir() takes umode_t */
/* #undef HAVE_MKDIR_UMODE_T */
/* Define to 1 if you have the `mlockall' function. */
#define HAVE_MLOCKALL 1
/* lookup_bdev() wants mode arg */
/* #undef HAVE_MODE_LOOKUP_BDEV */
/* Define if host toolchain supports MOVBE */
#define HAVE_MOVBE 1
/* new_sync_read()/new_sync_write() are available */
/* #undef HAVE_NEW_SYNC_READ */
/* folio_wait_bit() exists */
/* #undef HAVE_PAGEMAP_FOLIO_WAIT_BIT */
/* part_to_dev() exists */
/* #undef HAVE_PART_TO_DEV */
/* iops->getattr() takes a path */
/* #undef HAVE_PATH_IOPS_GETATTR */
/* Define if host toolchain supports PCLMULQDQ */
#define HAVE_PCLMULQDQ 1
/* percpu_counter_add_batch() is defined */
/* #undef HAVE_PERCPU_COUNTER_ADD_BATCH */
/* percpu_counter_init() wants gfp_t */
/* #undef HAVE_PERCPU_COUNTER_INIT_WITH_GFP */
/* posix_acl_chmod() exists */
/* #undef HAVE_POSIX_ACL_CHMOD */
/* posix_acl_from_xattr() needs user_ns */
/* #undef HAVE_POSIX_ACL_FROM_XATTR_USERNS */
/* posix_acl_release() is available */
/* #undef HAVE_POSIX_ACL_RELEASE */
/* posix_acl_release() is GPL-only */
/* #undef HAVE_POSIX_ACL_RELEASE_GPL_ONLY */
/* posix_acl_valid() wants user namespace */
/* #undef HAVE_POSIX_ACL_VALID_WITH_NS */
/* proc_ops structure exists */
/* #undef HAVE_PROC_OPS_STRUCT */
/* iops->put_link() cookie */
/* #undef HAVE_PUT_LINK_COOKIE */
/* iops->put_link() delayed */
/* #undef HAVE_PUT_LINK_DELAYED */
/* iops->put_link() nameidata */
/* #undef HAVE_PUT_LINK_NAMEIDATA */
/* If available, contains the Python version number currently in use. */
#define HAVE_PYTHON "3.7"
/* qat is enabled and existed */
/* #undef HAVE_QAT */
/* struct reclaim_state has reclaimed */
/* #undef HAVE_RECLAIM_STATE_RECLAIMED */
/* register_shrinker is vararg */
/* #undef HAVE_REGISTER_SHRINKER_VARARG */
/* iops->rename2() exists */
/* #undef HAVE_RENAME2 */
/* struct inode_operations_wrapper takes .rename2() */
/* #undef HAVE_RENAME2_OPERATIONS_WRAPPER */
/* iops->rename() wants flags */
/* #undef HAVE_RENAME_WANTS_FLAGS */
/* REQ_DISCARD is defined */
/* #undef HAVE_REQ_DISCARD */
/* REQ_FLUSH is defined */
/* #undef HAVE_REQ_FLUSH */
/* REQ_OP_DISCARD is defined */
/* #undef HAVE_REQ_OP_DISCARD */
/* REQ_OP_FLUSH is defined */
/* #undef HAVE_REQ_OP_FLUSH */
/* REQ_OP_SECURE_ERASE is defined */
/* #undef HAVE_REQ_OP_SECURE_ERASE */
/* REQ_PREFLUSH is defined */
/* #undef HAVE_REQ_PREFLUSH */
/* revalidate_disk() is available */
/* #undef HAVE_REVALIDATE_DISK */
/* revalidate_disk_size() is available */
/* #undef HAVE_REVALIDATE_DISK_SIZE */
/* struct rw_semaphore has member activity */
/* #undef HAVE_RWSEM_ACTIVITY */
/* struct rw_semaphore has atomic_long_t member count */
/* #undef HAVE_RWSEM_ATOMIC_LONG_COUNT */
/* linux/sched/signal.h exists */
/* #undef HAVE_SCHED_SIGNAL_HEADER */
/* Define to 1 if you have the <security/pam_modules.h> header file. */
#define HAVE_SECURITY_PAM_MODULES_H 1
/* setattr_prepare() accepts mnt_idmap */
/* #undef HAVE_SETATTR_PREPARE_IDMAP */
/* setattr_prepare() is available, doesn't accept user_namespace */
/* #undef HAVE_SETATTR_PREPARE_NO_USERNS */
/* setattr_prepare() accepts user_namespace */
/* #undef HAVE_SETATTR_PREPARE_USERNS */
/* iops->set_acl() exists, takes 3 args */
/* #undef HAVE_SET_ACL */
/* iops->set_acl() takes 4 args, arg1 is struct mnt_idmap * */
/* #undef HAVE_SET_ACL_IDMAP_DENTRY */
/* iops->set_acl() takes 4 args */
/* #undef HAVE_SET_ACL_USERNS */
/* iops->set_acl() takes 4 args, arg2 is struct dentry * */
/* #undef HAVE_SET_ACL_USERNS_DENTRY_ARG2 */
/* set_cached_acl() is usable */
/* #undef HAVE_SET_CACHED_ACL_USABLE */
/* set_special_state() exists */
/* #undef HAVE_SET_SPECIAL_STATE */
/* struct shrink_control exists */
/* #undef HAVE_SHRINK_CONTROL_STRUCT */
/* kernel_siginfo_t exists */
/* #undef HAVE_SIGINFO */
/* signal_stop() exists */
/* #undef HAVE_SIGNAL_STOP */
/* new shrinker callback wants 2 args */
/* #undef HAVE_SINGLE_SHRINKER_CALLBACK */
/* cs->count_objects exists */
/* #undef HAVE_SPLIT_SHRINKER_CALLBACK */
#if defined(__amd64__) || defined(__i386__)
/* Define if host toolchain supports SSE */
#define HAVE_SSE 1
/* Define if host toolchain supports SSE2 */
#define HAVE_SSE2 1
/* Define if host toolchain supports SSE3 */
#define HAVE_SSE3 1
/* Define if host toolchain supports SSE4.1 */
#define HAVE_SSE4_1 1
/* Define if host toolchain supports SSE4.2 */
#define HAVE_SSE4_2 1
/* Define if host toolchain supports SSSE3 */
#define HAVE_SSSE3 1
#endif
/* STACK_FRAME_NON_STANDARD is defined */
/* #undef HAVE_STACK_FRAME_NON_STANDARD */
/* standalone <linux/stdarg.h> exists */
/* #undef HAVE_STANDALONE_LINUX_STDARG */
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdio.h> header file. */
#define HAVE_STDIO_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if you have the `strlcat' function. */
#define HAVE_STRLCAT 1
/* Define to 1 if you have the `strlcpy' function. */
#define HAVE_STRLCPY 1
/* submit_bio is member of struct block_device_operations */
/* #undef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
/* super_setup_bdi_name() exits */
/* #undef HAVE_SUPER_SETUP_BDI_NAME */
/* super_block->s_user_ns exists */
/* #undef HAVE_SUPER_USER_NS */
/* struct kobj_type has default_groups */
/* #undef HAVE_SYSFS_DEFAULT_GROUPS */
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* i_op->tmpfile() exists */
/* #undef HAVE_TMPFILE */
/* i_op->tmpfile() uses old dentry signature */
/* #undef HAVE_TMPFILE_DENTRY */
/* i_op->tmpfile() has mnt_idmap */
/* #undef HAVE_TMPFILE_IDMAP */
/* i_op->tmpfile() has userns */
/* #undef HAVE_TMPFILE_USERNS */
/* totalhigh_pages() exists */
/* #undef HAVE_TOTALHIGH_PAGES */
/* kernel has totalram_pages() */
/* #undef HAVE_TOTALRAM_PAGES_FUNC */
/* Define to 1 if you have the `udev_device_get_is_initialized' function. */
/* #undef HAVE_UDEV_DEVICE_GET_IS_INITIALIZED */
/* kernel has __kernel_fpu_* functions */
/* #undef HAVE_UNDERSCORE_KERNEL_FPU */
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* iops->getattr() takes struct user_namespace* */
/* #undef HAVE_USERNS_IOPS_GETATTR */
/* iops->setattr() takes struct user_namespace* */
/* #undef HAVE_USERNS_IOPS_SETATTR */
/* user_namespace->ns.inum exists */
/* #undef HAVE_USER_NS_COMMON_INUM */
/* iops->getattr() takes a vfsmount */
/* #undef HAVE_VFSMOUNT_IOPS_GETATTR */
+/* fops->clone_file_range() is available */
+/* #undef HAVE_VFS_CLONE_FILE_RANGE */
+
+/* fops->copy_file_range() is available */
+/* #undef HAVE_VFS_COPY_FILE_RANGE */
+
+/* fops->dedupe_file_range() is available */
+/* #undef HAVE_VFS_DEDUPE_FILE_RANGE */
+
/* aops->direct_IO() uses iovec */
/* #undef HAVE_VFS_DIRECT_IO_IOVEC */
/* aops->direct_IO() uses iov_iter without rw */
/* #undef HAVE_VFS_DIRECT_IO_ITER */
/* aops->direct_IO() uses iov_iter with offset */
/* #undef HAVE_VFS_DIRECT_IO_ITER_OFFSET */
/* aops->direct_IO() uses iov_iter with rw and offset */
/* #undef HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET */
/* filemap_dirty_folio exists */
/* #undef HAVE_VFS_FILEMAP_DIRTY_FOLIO */
+/* file_operations_extend takes .copy_file_range() and .clone_file_range() */
+/* #undef HAVE_VFS_FILE_OPERATIONS_EXTEND */
+
+/* generic_copy_file_range() is available */
+/* #undef HAVE_VFS_GENERIC_COPY_FILE_RANGE */
+
/* All required iov_iter interfaces are available */
/* #undef HAVE_VFS_IOV_ITER */
/* fops->iterate() is available */
/* #undef HAVE_VFS_ITERATE */
/* fops->iterate_shared() is available */
/* #undef HAVE_VFS_ITERATE_SHARED */
/* fops->readdir() is available */
/* #undef HAVE_VFS_READDIR */
/* address_space_operations->readpages exists */
/* #undef HAVE_VFS_READPAGES */
/* read_folio exists */
/* #undef HAVE_VFS_READ_FOLIO */
+/* fops->remap_file_range() is available */
+/* #undef HAVE_VFS_REMAP_FILE_RANGE */
+
/* fops->read/write_iter() are available */
/* #undef HAVE_VFS_RW_ITERATE */
/* __set_page_dirty_nobuffers exists */
/* #undef HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS */
/* __vmalloc page flags exists */
/* #undef HAVE_VMALLOC_PAGE_KERNEL */
/* yes */
/* #undef HAVE_WAIT_ON_BIT_ACTION */
/* wait_queue_entry_t exists */
/* #undef HAVE_WAIT_QUEUE_ENTRY_T */
/* wq_head->head and wq_entry->entry exist */
/* #undef HAVE_WAIT_QUEUE_HEAD_ENTRY */
/* int (*writepage_t)() takes struct folio* */
/* #undef HAVE_WRITEPAGE_T_FOLIO */
/* xattr_handler->get() wants dentry */
/* #undef HAVE_XATTR_GET_DENTRY */
/* xattr_handler->get() wants both dentry and inode */
/* #undef HAVE_XATTR_GET_DENTRY_INODE */
/* xattr_handler->get() wants dentry and inode and flags */
/* #undef HAVE_XATTR_GET_DENTRY_INODE_FLAGS */
/* xattr_handler->get() wants xattr_handler */
/* #undef HAVE_XATTR_GET_HANDLER */
/* xattr_handler has name */
/* #undef HAVE_XATTR_HANDLER_NAME */
/* xattr_handler->list() wants dentry */
/* #undef HAVE_XATTR_LIST_DENTRY */
/* xattr_handler->list() wants xattr_handler */
/* #undef HAVE_XATTR_LIST_HANDLER */
/* xattr_handler->list() wants simple */
/* #undef HAVE_XATTR_LIST_SIMPLE */
/* xattr_handler->set() wants dentry */
/* #undef HAVE_XATTR_SET_DENTRY */
/* xattr_handler->set() wants both dentry and inode */
/* #undef HAVE_XATTR_SET_DENTRY_INODE */
/* xattr_handler->set() wants xattr_handler */
/* #undef HAVE_XATTR_SET_HANDLER */
/* xattr_handler->set() takes mnt_idmap */
/* #undef HAVE_XATTR_SET_IDMAP */
/* xattr_handler->set() takes user_namespace */
/* #undef HAVE_XATTR_SET_USERNS */
/* Define if host toolchain supports XSAVE */
#define HAVE_XSAVE 1
/* Define if host toolchain supports XSAVEOPT */
#define HAVE_XSAVEOPT 1
/* Define if host toolchain supports XSAVES */
#define HAVE_XSAVES 1
/* ZERO_PAGE() is GPL-only */
/* #undef HAVE_ZERO_PAGE_GPL_ONLY */
/* Define if you have [z] */
#define HAVE_ZLIB 1
/* __posix_acl_chmod() exists */
/* #undef HAVE___POSIX_ACL_CHMOD */
/* kernel exports FPU functions */
/* #undef KERNEL_EXPORTS_X86_FPU */
/* TBD: fetch(3) support */
#if 0
/* whether the chosen libfetch is to be loaded at run-time */
#define LIBFETCH_DYNAMIC 1
/* libfetch is fetch(3) */
#define LIBFETCH_IS_FETCH 1
/* libfetch is libcurl */
#define LIBFETCH_IS_LIBCURL 0
/* soname of chosen libfetch */
#define LIBFETCH_SONAME "libfetch.so.6"
#endif
/* Define to the sub-directory where libtool stores uninstalled libraries. */
#define LT_OBJDIR ".libs/"
/* make_request_fn() return type */
/* #undef MAKE_REQUEST_FN_RET */
/* struct shrink_control has nid */
/* #undef SHRINK_CONTROL_HAS_NID */
/* using complete_and_exit() instead */
/* #undef SPL_KTHREAD_COMPLETE_AND_EXIT */
/* Defined for legacy compatibility. */
#define SPL_META_ALIAS ZFS_META_ALIAS
/* Defined for legacy compatibility. */
#define SPL_META_RELEASE ZFS_META_RELEASE
/* Defined for legacy compatibility. */
#define SPL_META_VERSION ZFS_META_VERSION
/* pde_data() is PDE_DATA() */
/* #undef SPL_PDE_DATA */
/* Define to 1 if all of the C90 standard headers exist (not just the ones
required in a freestanding environment). This macro is provided for
backward compatibility; new code need not use it. */
#define SYSTEM_FREEBSD 1
/* True if ZFS is to be compiled for a Linux system */
/* #undef SYSTEM_LINUX */
/* Version number of package */
/* #undef ZFS_DEBUG */
/* /dev/zfs minor */
/* #undef ZFS_DEVICE_MINOR */
/* enum node_stat_item contains NR_FILE_PAGES */
/* #undef ZFS_ENUM_NODE_STAT_ITEM_NR_FILE_PAGES */
/* enum node_stat_item contains NR_INACTIVE_ANON */
/* #undef ZFS_ENUM_NODE_STAT_ITEM_NR_INACTIVE_ANON */
/* enum node_stat_item contains NR_INACTIVE_FILE */
/* #undef ZFS_ENUM_NODE_STAT_ITEM_NR_INACTIVE_FILE */
/* enum zone_stat_item contains NR_FILE_PAGES */
/* #undef ZFS_ENUM_ZONE_STAT_ITEM_NR_FILE_PAGES */
/* enum zone_stat_item contains NR_INACTIVE_ANON */
/* #undef ZFS_ENUM_ZONE_STAT_ITEM_NR_INACTIVE_ANON */
/* enum zone_stat_item contains NR_INACTIVE_FILE */
/* #undef ZFS_ENUM_ZONE_STAT_ITEM_NR_INACTIVE_FILE */
/* GENHD_FL_EXT_DEVT flag is not available */
/* #undef ZFS_GENHD_FL_EXT_DEVT */
/* GENHD_FL_NO_PART_SCAN flag is available */
/* #undef ZFS_GENHD_FL_NO_PART */
/* global_node_page_state() exists */
/* #undef ZFS_GLOBAL_NODE_PAGE_STATE */
/* global_zone_page_state() exists */
/* #undef ZFS_GLOBAL_ZONE_PAGE_STATE */
/* Define to 1 if GPL-only symbols can be used */
/* #undef ZFS_IS_GPL_COMPATIBLE */
/* Define the project alias string. */
-#define ZFS_META_ALIAS "zfs-2.2.0-FreeBSD_g009d3288"
+#define ZFS_META_ALIAS "zfs-2.2.0-FreeBSD_g32949f256"
/* Define the project author. */
#define ZFS_META_AUTHOR "OpenZFS"
/* Define the project release date. */
/* #undef ZFS_META_DATA */
/* Define the maximum compatible kernel version. */
-#define ZFS_META_KVER_MAX "6.3"
+#define ZFS_META_KVER_MAX "6.4"
/* Define the minimum compatible kernel version. */
#define ZFS_META_KVER_MIN "3.10"
/* Define the project license. */
#define ZFS_META_LICENSE "CDDL"
/* Define the libtool library 'age' version information. */
/* #undef ZFS_META_LT_AGE */
/* Define the libtool library 'current' version information. */
/* #undef ZFS_META_LT_CURRENT */
/* Define the libtool library 'revision' version information. */
/* #undef ZFS_META_LT_REVISION */
/* Define the project name. */
#define ZFS_META_NAME "zfs"
/* Define the project release. */
-#define ZFS_META_RELEASE "FreeBSD_g009d3288"
+#define ZFS_META_RELEASE "FreeBSD_g32949f256"
/* Define the project version. */
#define ZFS_META_VERSION "2.2.0"
/* count is located in percpu_ref.data */
/* #undef ZFS_PERCPU_REF_COUNT_IN_DATA */
diff --git a/sys/modules/zfs/zfs_gitrev.h b/sys/modules/zfs/zfs_gitrev.h
index afe3b29c865b..71be2d813a0a 100644
--- a/sys/modules/zfs/zfs_gitrev.h
+++ b/sys/modules/zfs/zfs_gitrev.h
@@ -1 +1 @@
-#define ZFS_META_GITREV "zfs-2.2.0-rc1-0-g009d3288d"
+#define ZFS_META_GITREV "zfs-2.2.0-rc3-31-g32949f256"

File Metadata

Mime Type
application/octet-stream
Expires
Mon, Jul 1, 4:25 AM (2 d)
Storage Engine
chunks
Storage Format
Chunks
Storage Handle
o3jU9Wsa3ib8
Default Alt Text
(5 MB)

Event Timeline